code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class __lowerCAmelCase ( __A ):
def __init__( self :Tuple , *__magic_name__ :List[str] , **__magic_name__ :str ):
'''simple docstring'''
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 359 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Optional[Any] = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ["MobileNetV2FeatureExtractor"]
__UpperCamelCase : Tuple = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 347 | 0 |
"""simple docstring"""
def __A ( __lowerCamelCase ) -> Dict:
if len(__lowerCamelCase ) <= 1:
return [tuple(__lowerCamelCase )]
a = []
def generate(__lowerCamelCase , __lowerCamelCase ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , __lowerCamelCase )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
a = arr[k - 1], arr[i]
else: # k is odd
a = arr[k - 1], arr[0]
generate(k - 1 , __lowerCamelCase )
generate(len(__lowerCamelCase ) , __lowerCamelCase )
return res
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = input("Enter numbers separated by a comma:\n").strip()
__UpperCamelCase : Any = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 360 |
def __A ( __lowerCamelCase ) -> bool:
if num < 0:
return False
a = num
a = 0
while num > 0:
a = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : str = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
__UpperCamelCase : Dict = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
__UpperCamelCase : List[str] = {
"abeja/gpt-neox-japanese-2.7b": 2_048,
}
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
a = json.loads(f.read() )
a = collections.OrderedDict()
a = collections.OrderedDict()
a = collections.OrderedDict()
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
a = f.readlines()
a = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(lowerCAmelCase__ ):
a = b
a = idx
for wd in b:
a = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self :Tuple , __magic_name__ :List[Any] , __magic_name__ :List[Any] , __magic_name__ :int="<|endoftext|>" , __magic_name__ :List[Any]="<|endoftext|>" , __magic_name__ :Tuple="<|startoftext|>" , __magic_name__ :Optional[Any]="<|endoftext|>" , __magic_name__ :Tuple=False , **__magic_name__ :Union[str, Any] , ):
'''simple docstring'''
super().__init__(
unk_token=_a , pad_token=_a , bos_token=_a , eos_token=_a , do_clean_text=_a , **_a , )
if not os.path.isfile(_a ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(_a ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
a = do_clean_text
a , a , a , a = load_vocab_and_emoji(_a , _a )
a = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
return len(self.raw_vocab )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :Dict ):
'''simple docstring'''
return self.subword_tokenizer.tokenize(_a , clean=self.do_clean_text )
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Dict ):
'''simple docstring'''
return self.vocab.get(_a , self.vocab.get(self.unk_token ) )
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :int ):
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(_a )
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Any ):
'''simple docstring'''
a = """""".join(_a ).strip()
return out_string
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Union[str, Any] ):
'''simple docstring'''
a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_a , add_special_tokens=_a ) + [self.eos_token_id] )
if len(_a ) > self.model_max_length:
a = input_ids[-self.model_max_length :]
return input_ids
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Optional[int] , __magic_name__ :Optional[Any] = None ):
'''simple docstring'''
a = 0
if os.path.isdir(_a ):
a = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
a = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
a = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
a = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(_a , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
a = token_index
writer.write(""",""".join(_a ) + """\n""" )
index += 1
with open(_a , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , _a )
return vocab_file, emoji_file
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self :Tuple , __magic_name__ :List[str] , __magic_name__ :str , __magic_name__ :List[Any] ):
'''simple docstring'''
a = vocab # same as swe
a = ids_to_tokens # same as bpe
a = emoji
a = np.max([len(_a ) for w in self.vocab.keys()] )
a = re.compile(r"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
a = re.compile(r"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
a = re.compile(r"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
a = re.compile(
r"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
a = re.compile(
r"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
a = re.compile(
r"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
a = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
a = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
a = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self :Optional[int] ):
'''simple docstring'''
return len(self.ids_to_tokens )
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :List[Any] ):
'''simple docstring'''
a = self.content_repattera.sub("""<URL>""" , _a )
a = self.content_repattera.sub("""<EMAIL>""" , _a )
a = self.content_repattera.sub("""<TEL>""" , _a )
a = self.content_repattera.sub("""<DATE>""" , _a )
a = self.content_repattera.sub("""<DATE>""" , _a )
a = self.content_repattera.sub("""<PRICE>""" , _a )
a = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
a = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Dict , __magic_name__ :Union[str, Any]=False ):
'''simple docstring'''
a = text.replace(""" """ , """<SP>""" )
a = text.replace(""" """ , """<SP>""" )
a = text.replace("""\r\n""" , """<BR>""" )
a = text.replace("""\n""" , """<BR>""" )
a = text.replace("""\r""" , """<BR>""" )
a = text.replace("""\t""" , """<TAB>""" )
a = text.replace("""—""" , """ー""" )
a = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
a = text.replace(_a , _a )
if clean:
a = self.clean_text(_a )
def check_simbol(__magic_name__ :Tuple ):
a = x.encode()
if len(_a ) == 1 and len(_a ) == 2:
a = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc_2_a_1 and c <= 0Xc_2_b_f)
or (c >= 0Xc_7_8_0 and c <= 0Xc_7_8_3)
or (c >= 0Xc_a_b_9 and c <= 0Xc_b_b_f)
or (c >= 0Xc_c_8_0 and c <= 0Xc_d_a_2)
):
return True
return False
def checkuae(__magic_name__ :Optional[Any] ):
a = x.encode()
if len(_a ) == 1 and len(_a ) == 3:
a = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe_2_8_0_8_0 and c <= 0Xe_2_b_0_7_f:
return True
return False
a = 0
a = []
while pos < len(_a ):
a = min(len(_a ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
a = [] # (token_id, token, pos)
for e in range(_a , _a , -1 ):
a = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_a ) > 2:
a = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_a ) > 0:
# the smallest token_id is adopted
a , a , a = sorted(_a , key=lambda __magic_name__ : x[0] )[0]
result.append(_a )
a = e
else:
a = pos + 1
a = text[pos:end]
if check_simbol(_a ):
result.append("""<KIGOU>""" )
elif checkuae(_a ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
a = end
return result
def lowerCamelCase__ ( self :int , __magic_name__ :List[Any] , __magic_name__ :int="\n" ):
'''simple docstring'''
a = []
a = []
a = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_a ) > 0:
words.append(bytearray(_a ).decode("""utf-8""" , errors="""replace""" ) )
a = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(_a )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(_a )
if len(_a ) > 0:
words.append(bytearray(_a ).decode("""utf-8""" , errors="""replace""" ) )
a = """""".join(_a )
return text
| 361 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = CanineTokenizer
UpperCamelCase__ = False
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
super().setUp()
a = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def lowerCamelCase__ ( self :Tuple , **__magic_name__ :Dict ):
'''simple docstring'''
a = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
a = 1024
return tokenizer
@require_torch
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.canine_tokenizer
a = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
a = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.canine_tokenizer
a = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , __magic_name__ )
self.assertIn("""attention_mask""" , __magic_name__ )
self.assertIn("""token_type_ids""" , __magic_name__ )
@require_torch
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.canine_tokenizer
a = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
a = tokenizer(
text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a = tempfile.mkdtemp()
a = """ He is very happy, UNwant\u00E9d,running"""
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
a = tokenizer.__class__.from_pretrained(__magic_name__ )
a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
shutil.rmtree(__magic_name__ )
a = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a = tempfile.mkdtemp()
a = """ He is very happy, UNwant\u00E9d,running"""
a = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
a = chr(0Xe_0_0_7 )
additional_special_tokens.append(__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
a = tokenizer.__class__.from_pretrained(__magic_name__ )
a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
a = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__magic_name__ )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a , a = self.get_clean_sequence(__magic_name__ )
# a special token for Canine can be defined as follows:
a = 0Xe_0_0_5
a = chr(__magic_name__ )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
a = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , input_encoded + special_token_id )
a = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
self.assertTrue(special_token not in decoded )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a = chr(0Xe_0_0_5 )
a = chr(0Xe_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
a = tokenizer.tokenize(__magic_name__ )
a = tokenizer.tokenize(__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(token_a[0] , __magic_name__ )
self.assertEqual(token_a[0] , __magic_name__ )
@require_tokenizers
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
a = 0Xe_0_0_6
a = chr(__magic_name__ )
a = AddedToken(__magic_name__ , lstrip=__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__magic_name__ )
tokenizer.from_pretrained(__magic_name__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
a = json.load(__magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
a = json.load(__magic_name__ )
# a special token for Canine can be defined as follows:
a = 0Xe_0_0_6
a = chr(__magic_name__ )
a = [new_token_a]
a = [new_token_a]
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
a = 0Xe_0_0_7
a = chr(__magic_name__ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a = [AddedToken(__magic_name__ , lstrip=__magic_name__ )]
a = tokenizer_class.from_pretrained(
__magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a = """hello world"""
if self.space_between_special_tokens:
a = """[CLS] hello world [SEP]"""
else:
a = input
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__magic_name__ , [output, output.lower()] )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
a = """a"""
a = ord(__magic_name__ )
for attr in attributes_list:
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] )
a = 0Xe_0_0_6
a = chr(__magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
| 347 | 0 |
def __A ( __lowerCamelCase = 10 ) -> str:
if not isinstance(lowercase__ , lowercase__ ) or n < 0:
raise ValueError("""Invalid input""" )
a = 10**n
a = 2_8433 * (pow(2 , 783_0457 , lowercase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'{solution(10) = }') | 362 |
def __A ( __lowerCamelCase ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 0 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__UpperCamelCase : str = TypeVar("T")
def __A ( __lowerCamelCase ) -> int:
return (position - 1) // 2
def __A ( __lowerCamelCase ) -> int:
return (2 * position) + 1
def __A ( __lowerCamelCase ) -> int:
return (2 * position) + 2
class __lowerCAmelCase ( Generic[T] ):
def __init__( self :Any ):
'''simple docstring'''
a = []
a = {}
a = 0
def __len__( self :Any ):
'''simple docstring'''
return self.elements
def __repr__( self :Tuple ):
'''simple docstring'''
return str(self.heap )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
return self.elements == 0
def lowerCamelCase__ ( self :Any , __magic_name__ :T , __magic_name__ :int ):
'''simple docstring'''
self.heap.append((elem, weight) )
a = self.elements
self.elements += 1
self._bubble_up(__magic_name__ )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
a = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
a = self.heap[0]
self._bubble_down(__magic_name__ )
return elem
def lowerCamelCase__ ( self :Dict , __magic_name__ :T , __magic_name__ :int ):
'''simple docstring'''
a = self.position_map[elem]
a = (elem, weight)
if position > 0:
a = get_parent_position(__magic_name__ )
a = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__magic_name__ )
else:
self._bubble_down(__magic_name__ )
else:
self._bubble_down(__magic_name__ )
def lowerCamelCase__ ( self :int , __magic_name__ :T ):
'''simple docstring'''
a = self.position_map[elem]
if curr_pos == 0:
return None
a = get_parent_position(__magic_name__ )
a = self.heap[curr_pos]
a = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_up(__magic_name__ )
return None
def lowerCamelCase__ ( self :List[str] , __magic_name__ :T ):
'''simple docstring'''
a = self.position_map[elem]
a = self.heap[curr_pos]
a = get_child_left_position(__magic_name__ )
a = get_child_right_position(__magic_name__ )
if child_left_position < self.elements and child_right_position < self.elements:
a = self.heap[child_left_position]
a = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
if child_left_position < self.elements:
a = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
else:
return None
if child_right_position < self.elements:
a = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
return None
def lowerCamelCase__ ( self :Dict , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
a = self.heap[nodea_pos][0]
a = self.heap[nodea_pos][0]
a = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
a = nodea_pos
a = nodea_pos
class __lowerCAmelCase ( Generic[T] ):
def __init__( self :Optional[Any] ):
'''simple docstring'''
a = {}
a = 0
def __repr__( self :Union[str, Any] ):
'''simple docstring'''
return str(self.connections )
def __len__( self :Any ):
'''simple docstring'''
return self.nodes
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :T ):
'''simple docstring'''
if node not in self.connections:
a = {}
self.nodes += 1
def lowerCamelCase__ ( self :int , __magic_name__ :T , __magic_name__ :T , __magic_name__ :int ):
'''simple docstring'''
self.add_node(__magic_name__ )
self.add_node(__magic_name__ )
a = weight
a = weight
def __A ( __lowerCamelCase , ) -> tuple[dict[T, int], dict[T, T | None]]:
a = {node: maxsize for node in graph.connections}
a = {node: None for node in graph.connections}
a = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_lowerCamelCase , _lowerCamelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
a = priority_queue.extract_min()
a = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_lowerCamelCase , dist[neighbour] )
a = node
# running prim's algorithm
while not priority_queue.is_empty():
a = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_lowerCamelCase , dist[neighbour] )
a = node
return dist, parent
| 363 |
def __A ( __lowerCamelCase ) -> int:
if not numbers:
return 0
if not isinstance(__lowerCamelCase , (list, tuple) ) or not all(
isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
a = a = a = numbers[0]
for i in range(1 , len(__lowerCamelCase ) ):
# update the maximum and minimum subarray products
a = numbers[i]
if number < 0:
a , a = min_till_now, max_till_now
a = max(__lowerCamelCase , max_till_now * number )
a = min(__lowerCamelCase , min_till_now * number )
# update the maximum product found till now
a = max(__lowerCamelCase , __lowerCamelCase )
return max_prod
| 347 | 0 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
class __lowerCAmelCase ( a_ ):
def __init__( self :str , __magic_name__ :List[str]=-1 ):
'''simple docstring'''
a = label_idx
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Optional[Any] , __magic_name__ :Dict ):
'''simple docstring'''
if isinstance(lowercase_ , lowercase_ ):
a = mode.value
a = os.path.join(lowercase_ , F'{mode}.txt' )
a = 1
a = []
with open(lowercase_ , encoding="""utf-8""" ) as f:
a = []
a = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase_ , labels=lowercase_ ) )
guid_index += 1
a = []
a = []
else:
a = line.split(""" """ )
words.append(splits[0] )
if len(lowercase_ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase_ , labels=lowercase_ ) )
return examples
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Union[str, Any] , __magic_name__ :Union[str, Any] , __magic_name__ :Dict ):
'''simple docstring'''
a = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(lowercase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
a = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(lowercase_ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for \'%s\'.""" , line.split()[0] )
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Optional[int] ):
'''simple docstring'''
if path:
with open(lowercase_ , """r""" ) as f:
a = f.read().splitlines()
if "O" not in labels:
a = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowerCAmelCase ( a_ ):
def __init__( self :List[str] ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def lowerCamelCase__ ( self :str , __magic_name__ :List[str] ):
'''simple docstring'''
if path:
with open(lowercase_ , """r""" ) as f:
a = f.read().splitlines()
if "O" not in labels:
a = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowerCAmelCase ( a_ ):
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Tuple , __magic_name__ :Dict ):
'''simple docstring'''
if isinstance(lowercase_ , lowercase_ ):
a = mode.value
a = os.path.join(lowercase_ , F'{mode}.txt' )
a = 1
a = []
with open(lowercase_ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(lowercase_ ):
a = []
a = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(lowercase_ ) == len(lowercase_ )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase_ , labels=lowercase_ ) )
guid_index += 1
return examples
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Dict , __magic_name__ :Any , __magic_name__ :Optional[Any] ):
'''simple docstring'''
a = 0
for sentence in parse_incr(lowercase_ ):
a = preds_list[example_id]
a = """"""
for token in sentence:
out += F'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(lowercase_ )
example_id += 1
def lowerCamelCase__ ( self :List[str] , __magic_name__ :List[str] ):
'''simple docstring'''
if path:
with open(lowercase_ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 364 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Optional[Any] = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 347 | 0 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Dict = {"vocab_file": "spiece.model"}
__UpperCamelCase : Optional[Any] = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
__UpperCamelCase : List[Any] = {
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class __lowerCAmelCase ( __lowercase ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self :List[Any] , __magic_name__ :Dict , __magic_name__ :Dict=False , __magic_name__ :Union[str, Any]=False , __magic_name__ :Union[str, Any]=False , __magic_name__ :int=None , __magic_name__ :Optional[Any]=None , __magic_name__ :Dict=None , __magic_name__ :List[Any]=None , __magic_name__ :List[Any] = None , **__magic_name__ :Any , ):
'''simple docstring'''
a = {} if sp_model_kwargs is None else sp_model_kwargs
a = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
a = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
a = '''<|endoftext|>''' if eos_token is None else eos_token
a = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
a = unk_token if pad_token is None else pad_token
a = eos_token if bos_token is None else bos_token
else:
a = '''<pad>''' if pad_token is None else pad_token
a = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
a = do_lower_case
a = remove_space
a = keep_accents
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
# Used for whitespace normalization in input texts
# fmt : off
a = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
a = re.compile(
F'[{"".join(map(_a , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' )
def __getstate__( self :Optional[Any] ):
'''simple docstring'''
a = self.__dict__.copy()
a = None
return state
def __setstate__( self :Tuple , __magic_name__ :Any ):
'''simple docstring'''
a = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
return len(self.sp_model )
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :int ):
'''simple docstring'''
a = self.non_printing_characters_re.sub("""""" , _a )
# Normalize whitespaces
a = ''''''.join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
a = unicodedata.normalize("""NFC""" , _a )
return text
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Optional[int] , **__magic_name__ :List[str] ):
'''simple docstring'''
a = self.preprocess_text(_a )
return self.sp_model.encode(_a , out_type=_a )
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :int ):
'''simple docstring'''
return self.sp_model.PieceToId(_a )
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Optional[Any] ):
'''simple docstring'''
return self.sp_model.IdToPiece(_a )
@staticmethod
def lowerCamelCase__ ( __magic_name__ :int ):
'''simple docstring'''
return out_string
def lowerCamelCase__ ( self :str , __magic_name__ :str ):
'''simple docstring'''
a = []
a = ''''''
a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
a = True
a = []
else:
current_sub_tokens.append(_a )
a = False
out_string += self.sp_model.decode(_a )
return out_string
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self :int , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , """wb""" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def lowerCamelCase__ ( self :str , __magic_name__ :List[Any] , __magic_name__ :Tuple = False ):
'''simple docstring'''
if isinstance(_a , _a ):
a = self.preprocess_text(_a )
a = self.sp_model.encode(_a )
else:
a = [self.preprocess_text(_a ) for t in text]
a = self.sp_model.encode(_a )
if return_tensors is True or return_tensors == "pt":
a = torch.tensor(_a )
return token_ids
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Union[str, Any] ):
'''simple docstring'''
return self.sp_model.decode(_a )
def lowerCamelCase__ ( self :Dict , __magic_name__ :Dict ):
'''simple docstring'''
a = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
a = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(_a ) + F'{self.bos_token}Bot:'
)
return self.encode(text=_a )
| 365 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , __lowerCamelCase )
a = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
a = dataset_size < in_memory_max_size
else:
a = False
a = is_small_dataset(__lowerCamelCase )
assert result == expected
| 347 | 0 |
def __A ( __lowerCamelCase ) -> int:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""Input value must be an 'int' type""" )
a = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __A ( __lowerCamelCase ) -> bool:
a = int(number**0.5 )
return number == sq * sq
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> tuple[int, int]:
a = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
a = x_den * y_den * z_den
a = gcd(__lowerCamelCase , __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def __A ( __lowerCamelCase = 35 ) -> int:
a = set()
a = 42
a = Fraction(0 )
a = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
a = x_num * y_den + x_den * y_num
a = x_den * y_den
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
a = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
a = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
a = int(sqrt(__lowerCamelCase ) )
a = int(sqrt(__lowerCamelCase ) )
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
a = x_num * y_num
a = x_den * y_num + x_num * y_den
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
a = x_num * x_num * y_num * y_num
a = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
a = int(sqrt(__lowerCamelCase ) )
a = int(sqrt(__lowerCamelCase ) )
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase , __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'{solution() = }')
| 347 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __lowerCAmelCase :
def __init__( self :Optional[int] , __magic_name__ :Any ):
'''simple docstring'''
a = data
a = None
class __lowerCAmelCase :
def __init__( self :Optional[Any] ):
'''simple docstring'''
a = None
a = None
def __iter__( self :Dict ):
'''simple docstring'''
a = self.head
while self.head:
yield node.data
a = node.next
if node == self.head:
break
def __len__( self :List[Any] ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self :Union[str, Any] ):
'''simple docstring'''
return "->".join(str(lowerCAmelCase__ ) for item in iter(self ) )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Any ):
'''simple docstring'''
self.insert_nth(len(self ) , lowerCAmelCase__ )
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Any ):
'''simple docstring'''
self.insert_nth(0 , lowerCAmelCase__ )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :int , __magic_name__ :Any ):
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError("""list index out of range.""" )
a = Node(lowerCAmelCase__ )
if self.head is None:
a = new_node # first node points itself
a = new_node
elif index == 0: # insert at head
a = self.head
a = new_node
else:
a = self.head
for _ in range(index - 1 ):
a = temp.next
a = temp.next
a = new_node
if index == len(self ) - 1: # insert at tail
a = new_node
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
return self.delete_nth(0 )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :int = 0 ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError("""list index out of range.""" )
a = self.head
if self.head == self.tail: # just one node
a = None
elif index == 0: # delete head node
a = self.tail.next.next
a = self.head.next
else:
a = self.head
for _ in range(index - 1 ):
a = temp.next
a = temp.next
a = temp.next.next
if index == len(self ) - 1: # delete at tail
a = temp
return delete_node.data
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
return len(self ) == 0
def __A ( ) -> Union[str, Any]:
a = CircularLinkedList()
assert len(a_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(a_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(a_ ) == i
circular_linked_list.insert_nth(a_ , i + 1 )
assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(a_ ) == "->".join(str(a_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self :List[str] , __magic_name__ :List[str] , __magic_name__ :List[Any]=13 , __magic_name__ :Any=7 , __magic_name__ :Optional[int]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :Any=99 , __magic_name__ :List[str]=32 , __magic_name__ :List[str]=5 , __magic_name__ :str=4 , __magic_name__ :str=37 , __magic_name__ :Optional[int]="gelu" , __magic_name__ :int=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :List[str]=512 , __magic_name__ :Tuple=16 , __magic_name__ :Tuple=2 , __magic_name__ :List[str]=0.02 , __magic_name__ :Any=4 , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_attention_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_choices
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_attention_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = FlaxRoFormerModelTester(self )
@slow
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__magic_name__ )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
a = jnp.array([[0, 1, 2, 3, 4, 5]] )
a = model(__magic_name__ )[0]
a = 5_0000
a = (1, 6, vocab_size)
self.assertEqual(output.shape , __magic_name__ )
a = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 347 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __A ( __lowerCamelCase ) -> Tuple:
return 1 / (1 + np.exp(-z ))
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
return (-y * np.log(_UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
a = np.dot(_UpperCamelCase , _UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(_UpperCamelCase ) ) )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=7_0000 ) -> List[Any]:
a = np.zeros(x.shape[1] )
for iterations in range(_UpperCamelCase ):
a = np.dot(_UpperCamelCase , _UpperCamelCase )
a = sigmoid_function(_UpperCamelCase )
a = np.dot(x.T , h - y ) / y.size
a = theta - alpha * gradient # updating the weights
a = np.dot(_UpperCamelCase , _UpperCamelCase )
a = sigmoid_function(_UpperCamelCase )
a = cost_function(_UpperCamelCase , _UpperCamelCase )
if iterations % 100 == 0:
print(f'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = datasets.load_iris()
__UpperCamelCase : List[Any] = iris.data[:, :2]
__UpperCamelCase : List[str] = (iris.target != 0) * 1
__UpperCamelCase : Tuple = 0.1
__UpperCamelCase : Any = logistic_reg(alpha, x, y, max_iterations=70_000)
print("theta: ", theta) # printing the theta i.e our weights vector
def __A ( __lowerCamelCase ) -> List[str]:
return sigmoid_function(
np.dot(_UpperCamelCase , _UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
(__UpperCamelCase) : Union[str, Any] = (x[:, 0].min(), x[:, 0].max())
(__UpperCamelCase) : Dict = (x[:, 1].min(), x[:, 1].max())
(__UpperCamelCase) : List[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__UpperCamelCase : int = np.c_[xxa.ravel(), xxa.ravel()]
__UpperCamelCase : Any = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 368 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[int] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 347 | 0 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__UpperCamelCase : str = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, "r", encoding="utf-8") as f:
__UpperCamelCase : Optional[Any] = json.load(f)
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :Any ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(_lowerCamelCase )
def lowerCamelCase__ ( self :str , __magic_name__ :Dict ):
'''simple docstring'''
a = FSMTForConditionalGeneration.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[Any] , __magic_name__ :Optional[int] ):
'''simple docstring'''
a = F'facebook/wmt19-{pair}'
a = self.get_tokenizer(_lowerCamelCase )
a = self.get_model(_lowerCamelCase )
a = bleu_data[pair]['''src''']
a = bleu_data[pair]['''tgt''']
a = tokenizer(_lowerCamelCase , return_tensors="""pt""" , truncation=_lowerCamelCase , padding="""longest""" ).to(_lowerCamelCase )
a = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
a = tokenizer.batch_decode(
_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
a = calculate_bleu(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
self.assertGreaterEqual(scores["""bleu"""] , _lowerCamelCase )
| 369 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = (IPNDMScheduler,)
UpperCamelCase__ = (('''num_inference_steps''', 50),)
def lowerCamelCase__ ( self :Any , **__magic_name__ :Optional[Any] ):
'''simple docstring'''
a = {"""num_train_timesteps""": 1000}
config.update(**__magic_name__ )
return config
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple=0 , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__magic_name__ )
a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
a = dummy_past_residuals[:]
if time_step is None:
a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
a = scheduler_class.from_pretrained(__magic_name__ )
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
a = dummy_past_residuals[:]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[Any]=0 , **__magic_name__ :Any ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals (must be after setting timesteps)
a = dummy_past_residuals[:]
if time_step is None:
a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
a = scheduler_class.from_pretrained(__magic_name__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residual (must be after setting timesteps)
a = dummy_past_residuals[:]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self :Optional[Any] , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__magic_name__ )
a = scheduler_class(**__magic_name__ )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter
scheduler.set_timesteps(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
a = model(__magic_name__ , __magic_name__ )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
a = model(__magic_name__ , __magic_name__ )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
return sample
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
if num_inference_steps is not None and hasattr(__magic_name__ , """set_timesteps""" ):
scheduler.set_timesteps(__magic_name__ )
elif num_inference_steps is not None and not hasattr(__magic_name__ , """set_timesteps""" ):
a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a = dummy_past_residuals[:]
a = scheduler.timesteps[5]
a = scheduler.timesteps[6]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__magic_name__ , time_step=__magic_name__ )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__magic_name__ , time_step=__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.full_loop()
a = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 347 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : int = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__UpperCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 370 |
__UpperCamelCase : Dict = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def __A ( ) -> None:
a = input("""Enter message: """ )
a = input("""Enter key [alphanumeric]: """ )
a = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
a = """encrypt"""
a = encrypt_message(__lowerCamelCase , __lowerCamelCase )
elif mode.lower().startswith("""d""" ):
a = """decrypt"""
a = decrypt_message(__lowerCamelCase , __lowerCamelCase )
print(f'\n{mode.title()}ed message:' )
print(__lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
return translate_message(__lowerCamelCase , __lowerCamelCase , """encrypt""" )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
return translate_message(__lowerCamelCase , __lowerCamelCase , """decrypt""" )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
a = []
a = 0
a = key.upper()
for symbol in message:
a = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__lowerCamelCase ):
a = 0
else:
translated.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
if __name__ == "__main__":
main()
| 347 | 0 |
import os
import string
import sys
__UpperCamelCase : Any = 1 << 8
__UpperCamelCase : Optional[Any] = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
__UpperCamelCase : Union[str, Any] = KEYMAP["up"]
__UpperCamelCase : int = KEYMAP["left"]
if sys.platform == "win32":
__UpperCamelCase : Tuple = []
__UpperCamelCase : List[Any] = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
__UpperCamelCase : Any = ord(str(i))
def __A ( ) -> List[str]:
if os.name == "nt":
import msvcrt
a = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(UpperCAmelCase__ ) == 0:
# Read the keystroke
a = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
a = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
a = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(UpperCAmelCase__ )
if ord(UpperCAmelCase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
a = chr(KEYMAP["""esc"""] )
except KeyError:
a = cha[1]
else:
a = ch.decode(UpperCAmelCase__ )
else:
a = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
a = sys.stdin.fileno()
a = termios.tcgetattr(UpperCAmelCase__ )
try:
tty.setraw(UpperCAmelCase__ )
a = sys.stdin.read(1 )
finally:
termios.tcsetattr(UpperCAmelCase__ , termios.TCSADRAIN , UpperCAmelCase__ )
return ch
def __A ( ) -> Union[str, Any]:
a = get_raw_chars()
if ord(UpperCAmelCase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(UpperCAmelCase__ ) == KEYMAP["esc"]:
a = get_raw_chars()
if ord(UpperCAmelCase__ ) == KEYMAP["mod_int"]:
a = get_raw_chars()
if ord(UpperCAmelCase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(UpperCAmelCase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(UpperCAmelCase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 371 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Tuple=13 , __magic_name__ :List[Any]=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :List[str]=True , __magic_name__ :str=99 , __magic_name__ :Optional[Any]=32 , __magic_name__ :Union[str, Any]=5 , __magic_name__ :Any=4 , __magic_name__ :int=37 , __magic_name__ :Tuple="gelu" , __magic_name__ :List[str]=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :Tuple=512 , __magic_name__ :Dict=16 , __magic_name__ :Optional[int]=2 , __magic_name__ :Optional[int]=0.02 , __magic_name__ :Optional[Any]=4 , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_attention_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_choices
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_attention_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = True
a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained("""roberta-base""" , from_pt=__magic_name__ )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
| 347 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( __lowercase , __lowercase , unittest.TestCase ):
UpperCamelCase__ = IFInpaintingSuperResolutionPipeline
UpperCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :List[str] , __magic_name__ :Any=0 ):
'''simple docstring'''
if str(_a ).startswith("""mps""" ):
a = torch.manual_seed(_a )
else:
a = torch.Generator(device=_a ).manual_seed(_a )
a = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a )
a = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
a = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
a = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
self._test_save_load_local()
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 350 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__UpperCamelCase : Any = datasets.utils.logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase ( datasets.BuilderConfig ):
UpperCamelCase__ = None
UpperCamelCase__ = "utf-8"
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = True # deprecated
UpperCamelCase__ = None # deprecated
UpperCamelCase__ = 10 << 20 # 10MB
UpperCamelCase__ = None
class __lowerCAmelCase ( datasets.ArrowBasedBuilder ):
UpperCamelCase__ = JsonConfig
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
a = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
a = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__magic_name__ , (str, list, tuple) ):
a = data_files
if isinstance(__magic_name__ , __magic_name__ ):
a = [files]
a = [dl_manager.iter_files(__magic_name__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
a = []
for split_name, files in data_files.items():
if isinstance(__magic_name__ , __magic_name__ ):
a = [files]
a = [dl_manager.iter_files(__magic_name__ ) for file in files]
splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"""files""": files} ) )
return splits
def lowerCamelCase__ ( self :List[str] , __magic_name__ :pa.Table ):
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
a = self.config.features.arrow_schema.field(__magic_name__ ).type
a = pa_table.append_column(__magic_name__ , pa.array([None] * len(__magic_name__ ) , type=__magic_name__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
a = table_cast(__magic_name__ , self.config.features.arrow_schema )
return pa_table
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Union[str, Any] ):
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
a = json.load(__magic_name__ )
# We keep only the field we are interested in
a = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__magic_name__ , (list, tuple) ):
a = set().union(*[row.keys() for row in dataset] )
a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
else:
a = dataset
a = pa.Table.from_pydict(__magic_name__ )
yield file_idx, self._cast_table(__magic_name__ )
# If the file has one json object per line
else:
with open(__magic_name__ , """rb""" ) as f:
a = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
a = max(self.config.chunksize // 32 , 16 << 10 )
a = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
a = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__magic_name__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
a = batch.decode(self.config.encoding , errors=__magic_name__ ).encode("""utf-8""" )
try:
while True:
try:
a = paj.read_json(
io.BytesIO(__magic_name__ ) , read_options=paj.ReadOptions(block_size=__magic_name__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__magic_name__ , pa.ArrowInvalid )
and "straddling" not in str(__magic_name__ )
or block_size > len(__magic_name__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'Batch of {len(__magic_name__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
a = json.load(__magic_name__ )
except json.JSONDecodeError:
logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__magic_name__ , __magic_name__ ): # list is the only sequence type supported in JSON
try:
a = set().union(*[row.keys() for row in dataset] )
a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
a = pa.Table.from_pydict(__magic_name__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' )
raise ValueError(F'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(__magic_name__ )
break
else:
logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' )
raise ValueError(
F'Not able to read records in the JSON file at {file}. '
F'You should probably indicate the field of the JSON file containing your records. '
F'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
F'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__magic_name__ )
batch_idx += 1
| 347 | 0 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowerCAmelCase :
def __init__( self :str , __magic_name__ :Dict , __magic_name__ :Any=13 , __magic_name__ :Union[str, Any]=7 , __magic_name__ :Optional[int]=True , __magic_name__ :Dict=True , __magic_name__ :int=False , __magic_name__ :Union[str, Any]=True , __magic_name__ :Tuple=99 , __magic_name__ :Optional[int]=32 , __magic_name__ :str=5 , __magic_name__ :List[Any]=4 , __magic_name__ :Optional[int]=37 , __magic_name__ :int="gelu" , __magic_name__ :Any=0.1 , __magic_name__ :List[str]=0.1 , __magic_name__ :List[str]=512 , __magic_name__ :List[Any]=16 , __magic_name__ :Optional[Any]=2 , __magic_name__ :Optional[int]=0.02 , __magic_name__ :List[str]=3 , __magic_name__ :int=4 , __magic_name__ :Dict=None , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Optional[int] , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :str , __magic_name__ :str , __magic_name__ :Any , __magic_name__ :Dict ):
'''simple docstring'''
a = OpenLlamaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a = model(lowercase_ , attention_mask=lowercase_ )
a = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self :int , __magic_name__ :List[str] , __magic_name__ :Optional[int] , __magic_name__ :str , __magic_name__ :Any , __magic_name__ :str , __magic_name__ :Any , __magic_name__ :Dict , __magic_name__ :List[Any] , __magic_name__ :Dict , ):
'''simple docstring'''
a = True
a = OpenLlamaModel(lowercase_ )
model.to(lowercase_ )
model.eval()
a = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
a = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
a = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Union[str, Any] , __magic_name__ :str , __magic_name__ :Optional[Any] , __magic_name__ :List[Any] , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :Any , ):
'''simple docstring'''
a = OpenLlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self :Any , __magic_name__ :List[str] , __magic_name__ :Tuple , __magic_name__ :Optional[Any] , __magic_name__ :str , __magic_name__ :List[Any] , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :Dict , __magic_name__ :Dict , ):
'''simple docstring'''
a = True
a = True
a = OpenLlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
a = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a = torch.cat([input_ids, next_tokens] , dim=-1 )
a = torch.cat([input_mask, next_mask] , dim=-1 )
a = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )["hidden_states"][0]
a = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )["hidden_states"][0]
# select random slice
a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a = output_from_no_past[:, -3:, random_slice_idx].detach()
a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
(
a
) = config_and_inputs
a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
UpperCamelCase__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
UpperCamelCase__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = OpenLlamaModelTester(self )
a = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a = type
self.model_tester.create_and_check_model(*lowercase_ )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(lowercase_ )
a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = "single_label_classification"
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(lowercase_ )
a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = "multi_label_classification"
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(lowercase_ )
a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
a = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :int ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs_for_common()
a = ids_tensor([1, 10] , config.vocab_size )
a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a = OpenLlamaModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
a = original_model(lowercase_ ).last_hidden_state
a = original_model(lowercase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a = {"type": scaling_type, "factor": 10.0}
a = OpenLlamaModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
a = scaled_model(lowercase_ ).last_hidden_state
a = scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
| 351 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self :List[str] , __magic_name__ :int="</s>" , __magic_name__ :List[Any]="<unk>" , __magic_name__ :Optional[Any]="<pad>" , __magic_name__ :Optional[int]=125 , __magic_name__ :List[str]=None , **__magic_name__ :List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
a = [F'<extra_id_{i}>' for i in range(__magic_name__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
a = len(set(filter(lambda __magic_name__ : bool("""extra_id""" in str(__magic_name__ ) ) , __magic_name__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token
super().__init__(
eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , extra_ids=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , )
a = extra_ids
a = 2**8 # utf is 8 bits
# define special tokens dict
a = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
a = len(self.special_tokens_encoder )
a = len(__magic_name__ )
for i, token in enumerate(__magic_name__ ):
a = self.vocab_size + i - n
a = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def lowerCamelCase__ ( self :Any , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None , __magic_name__ :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__magic_name__ )) + [1]
return ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1]
def lowerCamelCase__ ( self :str , __magic_name__ :List[int] ):
'''simple docstring'''
if len(__magic_name__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = self._add_eos_if_not_present(__magic_name__ )
if token_ids_a is None:
return token_ids_a
else:
a = self._add_eos_if_not_present(__magic_name__ )
return token_ids_a + token_ids_a
def lowerCamelCase__ ( self :List[str] , __magic_name__ :str ):
'''simple docstring'''
a = [chr(__magic_name__ ) for i in text.encode("""utf-8""" )]
return tokens
def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ):
'''simple docstring'''
if token in self.special_tokens_encoder:
a = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
a = self.added_tokens_encoder[token]
elif len(__magic_name__ ) != 1:
a = self.unk_token_id
else:
a = ord(__magic_name__ ) + self._num_special_tokens
return token_id
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Dict ):
'''simple docstring'''
if index in self.special_tokens_decoder:
a = self.special_tokens_decoder[index]
else:
a = chr(index - self._num_special_tokens )
return token
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Optional[int] ):
'''simple docstring'''
a = b""""""
for token in tokens:
if token in self.special_tokens_decoder:
a = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
a = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
a = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
a = token.encode("""utf-8""" )
else:
a = bytes([ord(__magic_name__ )] )
bstring += tok_string
a = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :str , __magic_name__ :Optional[str] = None ):
'''simple docstring'''
return ()
| 347 | 0 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Any = logging.get_logger(__name__)
def __A ( __lowerCamelCase ) -> List[str]:
print("""Loading config file...""" )
def flatten_yaml_as_dict(__lowerCamelCase , __lowerCamelCase="" , __lowerCamelCase="." ):
a = []
for k, v in d.items():
a = parent_key + sep + k if parent_key else k
if isinstance(_snake_case , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(_snake_case , _snake_case , sep=_snake_case ).items() )
else:
items.append((new_key, v) )
return dict(_snake_case )
a = argparse.Namespace()
with open(_snake_case , """r""" ) as yaml_file:
try:
a = yaml.load(_snake_case , Loader=yaml.FullLoader )
a = flatten_yaml_as_dict(_snake_case )
for k, v in flat_cfg.items():
setattr(_snake_case , _snake_case , _snake_case )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(_snake_case , str(_snake_case ) ) )
return config
def __A ( __lowerCamelCase , __lowerCamelCase ) -> List[str]:
a = MobileViTVaConfig()
a = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
a = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
a = 384
else:
a = 256
a = '''imagenet-1k-id2label.json'''
elif task_name.startswith("""imagenet21k_to_1k_""" ):
a = 2_1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
a = 384
else:
a = 256
a = '''imagenet-22k-id2label.json'''
elif task_name.startswith("""ade20k_""" ):
a = 151
a = 512
a = '''ade20k-id2label.json'''
a = True
elif task_name.startswith("""voc_""" ):
a = 21
a = 512
a = '''pascal-voc-id2label.json'''
a = True
# orig_config
a = load_orig_config_file(_snake_case )
assert getattr(_snake_case , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
a = getattr(_snake_case , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(_snake_case , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
a = getattr(_snake_case , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
a = getattr(_snake_case , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
a = getattr(_snake_case , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
a = getattr(_snake_case , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 )
a = getattr(_snake_case , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
a = '''huggingface/label-files'''
a = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="""dataset""" ) , """r""" ) )
a = {int(_snake_case ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
return config
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
a = dct.pop(_snake_case )
a = val
def __A ( __lowerCamelCase , __lowerCamelCase=False ) -> Tuple:
if base_model:
a = ''''''
else:
a = '''mobilevitv2.'''
a = []
for k in state_dict.keys():
if k[:8] == "encoder.":
a = k[8:]
else:
a = k
if ".block." in k:
a = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
a = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
a = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
a = k_new.replace("""conv_1.""" , f'{model_prefix}conv_stem.' )
for i in [1, 2]:
if f'layer_{i}.' in k:
a = k_new.replace(f'layer_{i}.' , f'{model_prefix}encoder.layer.{i-1}.layer.' )
if ".exp_1x1." in k:
a = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
a = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if f'layer_{i}.0.' in k:
a = k_new.replace(f'layer_{i}.0.' , f'{model_prefix}encoder.layer.{i-1}.downsampling_layer.' )
if f'layer_{i}.1.local_rep.0.' in k:
a = k_new.replace(f'layer_{i}.1.local_rep.0.' , f'{model_prefix}encoder.layer.{i-1}.conv_kxk.' )
if f'layer_{i}.1.local_rep.1.' in k:
a = k_new.replace(f'layer_{i}.1.local_rep.1.' , f'{model_prefix}encoder.layer.{i-1}.conv_1x1.' )
for i in [3, 4, 5]:
if i == 3:
a = [0, 1]
elif i == 4:
a = [0, 1, 2, 3]
elif i == 5:
a = [0, 1, 2]
for j in j_in:
if f'layer_{i}.1.global_rep.{j}.' in k:
a = k_new.replace(
f'layer_{i}.1.global_rep.{j}.' , f'{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.' )
if f'layer_{i}.1.global_rep.{j+1}.' in k:
a = k_new.replace(
f'layer_{i}.1.global_rep.{j+1}.' , f'{model_prefix}encoder.layer.{i-1}.layernorm.' )
if f'layer_{i}.1.conv_proj.' in k:
a = k_new.replace(f'layer_{i}.1.conv_proj.' , f'{model_prefix}encoder.layer.{i-1}.conv_projection.' )
if "pre_norm_attn.0." in k:
a = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
a = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
a = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
a = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
a = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
a = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
a = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
a = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
a = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def __A ( __lowerCamelCase ) -> List[str]:
a = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(_snake_case )
for k in keys_to_ignore:
state_dict.pop(_snake_case , _snake_case )
def __A ( ) -> str:
a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
a = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
a = get_mobilevitva_config(_snake_case , _snake_case )
# load original state_dict
a = torch.load(_snake_case , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
a = MobileViTVaForSemanticSegmentation(_snake_case ).eval()
a = False
else:
a = MobileViTVaForImageClassification(_snake_case ).eval()
a = False
# remove and rename some keys of load the original model
a = checkpoint
remove_unused_keys(_snake_case )
a = create_rename_keys(_snake_case , base_model=_snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
# load modified state_dict
model.load_state_dict(_snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
a = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
a = image_processor(images=prepare_img() , return_tensors="""pt""" )
a = model(**_snake_case )
# verify classification model
if task_name.startswith("""imagenet""" ):
a = outputs.logits
a = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
a = torch.tensor([-1.6_3_3_6E0_0, -7.3_2_0_4E-0_2, -5.1_8_8_3E-0_1] )
assert torch.allclose(logits[0, :3] , _snake_case , atol=1E-4 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(f'Saving model {task_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_snake_case )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_snake_case )
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
__UpperCamelCase : Any = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 352 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCAmelCase :
def __init__( self :Optional[int] , __magic_name__ :str , __magic_name__ :int=2 , __magic_name__ :List[str]=3 , __magic_name__ :Optional[int]=4 , __magic_name__ :str=2 , __magic_name__ :Any=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Dict=True , __magic_name__ :List[Any]=99 , __magic_name__ :Dict=36 , __magic_name__ :Optional[Any]=3 , __magic_name__ :str=4 , __magic_name__ :Optional[Any]=37 , __magic_name__ :Dict="gelu" , __magic_name__ :Any=0.1 , __magic_name__ :Union[str, Any]=0.1 , __magic_name__ :Dict=512 , __magic_name__ :str=16 , __magic_name__ :List[Any]=2 , __magic_name__ :Tuple=0.02 , __magic_name__ :Any=6 , __magic_name__ :Optional[int]=6 , __magic_name__ :Tuple=3 , __magic_name__ :str=4 , __magic_name__ :List[str]=None , __magic_name__ :str=1000 , ):
'''simple docstring'''
a = parent
a = batch_size
a = num_channels
a = image_size
a = patch_size
a = text_seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = coordinate_size
a = shape_size
a = num_labels
a = num_choices
a = scope
a = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a = text_seq_length
a = (image_size // patch_size) ** 2 + 1
a = self.text_seq_length + self.image_seq_length
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
a = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a = bbox[i, j, 3]
a = bbox[i, j, 1]
a = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a = bbox[i, j, 2]
a = bbox[i, j, 0]
a = t
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.text_seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
a = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase__ ( self :int , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :List[str] , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int ):
'''simple docstring'''
a = LayoutLMvaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# text + image
a = model(__magic_name__ , pixel_values=__magic_name__ )
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , token_type_ids=__magic_name__ )
a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
a = model(__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
a = model(pixel_values=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :List[Any] , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :List[str] , __magic_name__ :List[str] ):
'''simple docstring'''
a = self.num_labels
a = LayoutLMvaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Dict , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :List[str] , __magic_name__ :Tuple ):
'''simple docstring'''
a = self.num_labels
a = LayoutLMvaForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :str , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :str , __magic_name__ :List[str] , __magic_name__ :Optional[int] , __magic_name__ :Optional[Any] ):
'''simple docstring'''
a = LayoutLMvaForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :List[Any] ):
'''simple docstring'''
return True
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = LayoutLMvaModelTester(self )
a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :Any=False ):
'''simple docstring'''
a = copy.deepcopy(__magic_name__ )
if model_class in get_values(__magic_name__ ):
a = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__magic_name__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__magic_name__ ):
a = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
elif model_class in get_values(__magic_name__ ):
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
elif model_class in [
*get_values(__magic_name__ ),
]:
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
elif model_class in [
*get_values(__magic_name__ ),
]:
a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__magic_name__ , )
return inputs_dict
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a = type
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@slow
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = LayoutLMvaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __A ( ) -> str:
a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(__magic_name__ )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=__magic_name__ , return_tensors="""pt""" ).pixel_values.to(__magic_name__ )
a = torch.tensor([[1, 2]] )
a = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
a = model(
input_ids=input_ids.to(__magic_name__ ) , bbox=bbox.to(__magic_name__ ) , pixel_values=pixel_values.to(__magic_name__ ) , )
# verify the logits
a = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ )
a = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 347 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Union[str, Any] = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 353 |
from copy import deepcopy
class __lowerCAmelCase :
def __init__( self :Union[str, Any] , __magic_name__ :list[int] | None = None , __magic_name__ :int | None = None ):
'''simple docstring'''
if arr is None and size is not None:
a = size
a = [0] * size
elif arr is not None:
self.init(__magic_name__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def lowerCamelCase__ ( self :Dict , __magic_name__ :list[int] ):
'''simple docstring'''
a = len(__magic_name__ )
a = deepcopy(__magic_name__ )
for i in range(1 , self.size ):
a = self.next_(__magic_name__ )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
a = self.next_(__magic_name__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCamelCase__ ( __magic_name__ :int ):
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def lowerCamelCase__ ( __magic_name__ :int ):
'''simple docstring'''
return index - (index & (-index))
def lowerCamelCase__ ( self :Any , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
a = self.next_(__magic_name__ )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
self.add(__magic_name__ , value - self.get(__magic_name__ ) )
def lowerCamelCase__ ( self :int , __magic_name__ :int ):
'''simple docstring'''
if right == 0:
return 0
a = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
a = self.prev(__magic_name__ )
return result
def lowerCamelCase__ ( self :int , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
return self.prefix(__magic_name__ ) - self.prefix(__magic_name__ )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :int ):
'''simple docstring'''
return self.query(__magic_name__ , index + 1 )
def lowerCamelCase__ ( self :Dict , __magic_name__ :int ):
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
a = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
a = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 0 |
__UpperCamelCase : List[str] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCamelCase : Tuple = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCamelCase : Dict = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 354 |
from __future__ import annotations
from typing import Generic, TypeVar
__UpperCamelCase : Union[str, Any] = TypeVar("T")
class __lowerCAmelCase ( Generic[T] ):
def __init__( self :Tuple , __magic_name__ :T ):
'''simple docstring'''
a = data
a = self
a = 0
class __lowerCAmelCase ( Generic[T] ):
def __init__( self :Tuple ):
'''simple docstring'''
a = {}
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :T ):
'''simple docstring'''
a = DisjointSetTreeNode(__magic_name__ )
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :T ):
'''simple docstring'''
a = self.map[data]
if elem_ref != elem_ref.parent:
a = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :DisjointSetTreeNode[T] , __magic_name__ :DisjointSetTreeNode[T] ):
'''simple docstring'''
if nodea.rank > nodea.rank:
a = nodea
else:
a = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :T , __magic_name__ :T ):
'''simple docstring'''
self.link(self.find_set(__magic_name__ ) , self.find_set(__magic_name__ ) )
class __lowerCAmelCase ( Generic[T] ):
def __init__( self :Union[str, Any] ):
'''simple docstring'''
a = {}
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :T ):
'''simple docstring'''
if node not in self.connections:
a = {}
def lowerCamelCase__ ( self :Any , __magic_name__ :T , __magic_name__ :T , __magic_name__ :int ):
'''simple docstring'''
self.add_node(__magic_name__ )
self.add_node(__magic_name__ )
a = weight
a = weight
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = []
a = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __magic_name__ : x[2] )
# creating the disjoint set
a = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__magic_name__ )
# MST generation
a = 0
a = 0
a = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
a , a , a = edges[index]
index += 1
a = disjoint_set.find_set(__magic_name__ )
a = disjoint_set.find_set(__magic_name__ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__magic_name__ , __magic_name__ , __magic_name__ )
disjoint_set.union(__magic_name__ , __magic_name__ )
return graph
| 347 | 0 |
import requests
from bsa import BeautifulSoup
def lowercase__ ( __lowerCamelCase = "https://www.worldometers.info/coronavirus" ) -> int:
a = BeautifulSoup(requests.get(snake_case_ ).text , """html.parser""" )
a = soup.findAll("""h1""" )
a = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(snake_case_ , snake_case_ )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(F'{key}\n{value}\n')
| 355 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = tempfile.mkdtemp()
a = BlipImageProcessor()
a = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
a = BlipProcessor(__magic_name__ , __magic_name__ )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self :List[Any] , **__magic_name__ :Union[str, Any] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer
def lowerCamelCase__ ( self :str , **__magic_name__ :List[str] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
a = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
a = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = self.prepare_image_inputs()
a = image_processor(__magic_name__ , return_tensors="""np""" )
a = processor(images=__magic_name__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = """lower newer"""
a = processor(text=__magic_name__ )
a = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = """lower newer"""
a = self.prepare_image_inputs()
a = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__magic_name__ )
a = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = """lower newer"""
a = self.prepare_image_inputs()
a = processor(text=__magic_name__ , images=__magic_name__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 347 | 0 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
def __init__( self :Any , __magic_name__ :List[str] , __magic_name__ :Optional[int]=2 , __magic_name__ :Optional[int]=8 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[str]=True , __magic_name__ :List[Any]=True , __magic_name__ :List[str]=True , __magic_name__ :str=99 , __magic_name__ :List[str]=16 , __magic_name__ :int=5 , __magic_name__ :List[Any]=2 , __magic_name__ :Union[str, Any]=36 , __magic_name__ :Dict="gelu" , __magic_name__ :List[str]=0.0 , __magic_name__ :List[str]=0.0 , __magic_name__ :List[Any]=512 , __magic_name__ :Dict=16 , __magic_name__ :Tuple=2 , __magic_name__ :int=0.02 , __magic_name__ :str=3 , __magic_name__ :Union[str, Any]=4 , __magic_name__ :Any=None , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.get_config()
a = 300
return config
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
(
a
) = self.prepare_config_and_inputs()
a = True
a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase__ ( self :str , __magic_name__ :Optional[int] , __magic_name__ :Union[str, Any] , __magic_name__ :Any , __magic_name__ :List[str] , __magic_name__ :Optional[int] , __magic_name__ :Optional[int] , __magic_name__ :Union[str, Any] ):
'''simple docstring'''
a = MraModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
a = model(__magic_name__ , token_type_ids=__magic_name__ )
a = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :int , __magic_name__ :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :int , __magic_name__ :Dict , __magic_name__ :Any , __magic_name__ :List[str] , __magic_name__ :str , __magic_name__ :Tuple , ):
'''simple docstring'''
a = True
a = MraModel(__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , )
a = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , encoder_hidden_states=__magic_name__ , )
a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[Any] , __magic_name__ :Tuple , __magic_name__ :Optional[int] , __magic_name__ :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :int , __magic_name__ :Tuple ):
'''simple docstring'''
a = MraForMaskedLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Any , __magic_name__ :Tuple , __magic_name__ :Dict , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :List[str] , __magic_name__ :Tuple ):
'''simple docstring'''
a = MraForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :List[str] , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :Any , __magic_name__ :str , __magic_name__ :Any , __magic_name__ :Tuple ):
'''simple docstring'''
a = self.num_labels
a = MraForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self :Any , __magic_name__ :int , __magic_name__ :List[Any] , __magic_name__ :List[Any] , __magic_name__ :str , __magic_name__ :Optional[int] , __magic_name__ :Optional[Any] , __magic_name__ :Optional[int] ):
'''simple docstring'''
a = self.num_labels
a = MraForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Optional[int] , __magic_name__ :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Tuple , __magic_name__ :str , __magic_name__ :Tuple , __magic_name__ :List[str] ):
'''simple docstring'''
a = self.num_choices
a = MraForMultipleChoice(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
(
a
) = config_and_inputs
a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = ()
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = MraModelTester(self )
a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a = type
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
@slow
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = MraModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@unittest.skip(reason="""MRA does not output attentions""" )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
return
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
a = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
a = model(__magic_name__ )[0]
a = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __magic_name__ )
a = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
a = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
a = model(__magic_name__ )[0]
a = 5_0265
a = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __magic_name__ )
a = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
a = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
a = model(__magic_name__ )[0]
a = 5_0265
a = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , __magic_name__ )
a = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 356 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
UpperCamelCase__ = '''nat'''
UpperCamelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :Any , __magic_name__ :int=4 , __magic_name__ :Dict=3 , __magic_name__ :List[str]=64 , __magic_name__ :Optional[int]=[3, 4, 6, 5] , __magic_name__ :int=[2, 4, 8, 16] , __magic_name__ :str=7 , __magic_name__ :Tuple=3.0 , __magic_name__ :Dict=True , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.1 , __magic_name__ :Optional[Any]="gelu" , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :Tuple=1E-5 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :int=None , __magic_name__ :Any=None , **__magic_name__ :Dict , ):
'''simple docstring'''
super().__init__(**__magic_name__ )
a = patch_size
a = num_channels
a = embed_dim
a = depths
a = len(__magic_name__ )
a = num_heads
a = kernel_size
a = mlp_ratio
a = qkv_bias
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = drop_path_rate
a = hidden_act
a = layer_norm_eps
a = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) )
a = layer_scale_init_value
a = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(__magic_name__ ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
| 347 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
UpperCamelCase__ = StableDiffusionSAGPipeline
UpperCamelCase__ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ = False
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
torch.manual_seed(0 )
a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
a = CLIPTextModel(UpperCamelCase_ )
a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
a = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ ( self :Dict , __magic_name__ :Union[str, Any] , __magic_name__ :Tuple=0 ):
'''simple docstring'''
if str(UpperCamelCase_ ).startswith("""mps""" ):
a = torch.manual_seed(UpperCamelCase_ )
else:
a = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
a = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
a = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
a = """."""
a = torch.manual_seed(0 )
a = sag_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
a = output.images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
a = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
a = """."""
a = torch.manual_seed(0 )
a = sag_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
a = output.images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
a = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
a = """."""
a = torch.manual_seed(0 )
a = sag_pipe(
[prompt] , width=768 , height=512 , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
a = output.images
assert image.shape == (1, 512, 768, 3)
| 357 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
a = flax_key_tuple[:-1] + ("""weight""",)
a = torch.permute(__lowerCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ):
# linear layer
a = flax_key_tuple[:-1] + ("""weight""",)
a = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
if "metadata" in layer:
a = layer.split("""metadata""" )
a = """""".join(split_layer[0] )[:-1]
a = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
a = layer.split("""kvstore""" )
a = """""".join(split_layer[0] )[:-1]
a = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
a = layer.split("""/""" )
a = """/""".join(split_layer[:-1] )
a = (split_layer[-1],)
if "kvstore/path" in layer:
a = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
a = """file"""
else:
a = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
a = rename_keys(__lowerCamelCase )
a = {}
for k, v in current_block.items():
a = v
a = new_current_block
torch.save(__lowerCamelCase , __lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = WEIGHTS_NAME ) -> List[str]:
a = convert_file_size_to_int(__lowerCamelCase )
a = []
a = {}
a = 0
a = 0
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
a = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
a = flatten_dict(__lowerCamelCase , sep="""/""" )
a = {}
for layer in checkpoint_info.keys():
a , a , a = get_key_and_tensorstore_dict(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if curr_real_layer_name in all_layers:
a = content
else:
a = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
a = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
a = torch.tensor(__lowerCamelCase )
a = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
a , a = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __lowerCamelCase )
a = """/""".join(__lowerCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
a = os.path.join(
__lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(__lowerCamelCase , __lowerCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
a = {}
a = 0
a = raw_weights.to(getattr(__lowerCamelCase , __lowerCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(__lowerCamelCase , __lowerCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__lowerCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
a = {}
a = {}
for idx, shard in enumerate(__lowerCamelCase ):
a = weights_name.replace(
""".bin""" , f'-{idx+1:05d}-of-{len(__lowerCamelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
a = shard
for key in shard:
a = shard_file
# Add the metadata
a = {"""total_size""": total_size}
a = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
a = json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase ) + """\n"""
f.write(__lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase : Any = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __A ( ) -> Tuple:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
a = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
a = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
a = TaTokenizer.from_pretrained("""t5-small""" )
a = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
a = tokenizer(__lowerCamelCase , return_tensors="""pt""" ).input_ids
a = model.generate(__lowerCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 347 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
torch.manual_seed(0 )
a = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
a = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__lowerCAmelCase )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = self.dummy_uncond_unet
a = DDIMScheduler()
a = self.dummy_vq_model
a = LDMPipeline(unet=__lowerCAmelCase , vqvae=__lowerCAmelCase , scheduler=__lowerCAmelCase )
ldm.to(__lowerCAmelCase )
ldm.set_progress_bar_config(disable=__lowerCAmelCase )
a = torch.manual_seed(0 )
a = ldm(generator=__lowerCAmelCase , num_inference_steps=2 , output_type="""numpy""" ).images
a = torch.manual_seed(0 )
a = ldm(generator=__lowerCAmelCase , num_inference_steps=2 , output_type="""numpy""" , return_dict=__lowerCAmelCase )[0]
a = image[0, -3:, -3:, -1]
a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
a = 1E-2 if torch_device != """mps""" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(__lowerCAmelCase )
ldm.set_progress_bar_config(disable=__lowerCAmelCase )
a = torch.manual_seed(0 )
a = ldm(generator=__lowerCAmelCase , num_inference_steps=5 , output_type="""numpy""" ).images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
a = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
a = 1E-2 if torch_device != """mps""" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 358 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__UpperCamelCase : Union[str, Any] = (720, 1_280) # Height, Width
__UpperCamelCase : Any = (0.4, 0.6) # if height or width lower than this scale, drop it.
__UpperCamelCase : str = 1 / 100
__UpperCamelCase : Optional[int] = ""
__UpperCamelCase : List[Any] = ""
__UpperCamelCase : Union[str, Any] = ""
__UpperCamelCase : Tuple = 250
def __A ( ) -> None:
a , a = get_dataset(__lowerCamelCase , __lowerCamelCase )
for index in range(__lowerCamelCase ):
a = random.sample(range(len(__lowerCamelCase ) ) , 4 )
a , a , a = update_image_and_anno(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , filter_scale=__lowerCamelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
a = random_chars(32 )
a = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
a = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'
cva.imwrite(f'{file_root}.jpg' , __lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' )
a = []
for anno in new_annos:
a = anno[3] - anno[1]
a = anno[4] - anno[2]
a = anno[1] + width / 2
a = anno[2] + height / 2
a = f'{anno[0]} {x_center} {y_center} {width} {height}'
annos_list.append(__lowerCamelCase )
with open(f'{file_root}.txt' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> tuple[list, list]:
a = []
a = []
for label_file in glob.glob(os.path.join(__lowerCamelCase , """*.txt""" ) ):
a = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(__lowerCamelCase ) as in_file:
a = in_file.readlines()
a = os.path.join(__lowerCamelCase , f'{label_name}.jpg' )
a = []
for obj_list in obj_lists:
a = obj_list.rstrip("""\n""" ).split(""" """ )
a = float(obj[1] ) - float(obj[3] ) / 2
a = float(obj[2] ) - float(obj[4] ) / 2
a = float(obj[1] ) + float(obj[3] ) / 2
a = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__lowerCamelCase )
labels.append(__lowerCamelCase )
return img_paths, labels
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 0.0 , ) -> tuple[list, list, str]:
a = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
a = int(scale_x * output_size[1] )
a = int(scale_y * output_size[0] )
a = []
a = []
for i, index in enumerate(__lowerCamelCase ):
a = all_img_list[index]
path_list.append(__lowerCamelCase )
a = all_annos[index]
a = cva.imread(__lowerCamelCase )
if i == 0: # top-left
a = cva.resize(__lowerCamelCase , (divid_point_x, divid_point_y) )
a = img
for bbox in img_annos:
a = bbox[1] * scale_x
a = bbox[2] * scale_y
a = bbox[3] * scale_x
a = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
a = cva.resize(__lowerCamelCase , (output_size[1] - divid_point_x, divid_point_y) )
a = img
for bbox in img_annos:
a = scale_x + bbox[1] * (1 - scale_x)
a = bbox[2] * scale_y
a = scale_x + bbox[3] * (1 - scale_x)
a = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
a = cva.resize(__lowerCamelCase , (divid_point_x, output_size[0] - divid_point_y) )
a = img
for bbox in img_annos:
a = bbox[1] * scale_x
a = scale_y + bbox[2] * (1 - scale_y)
a = bbox[3] * scale_x
a = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
a = cva.resize(
__lowerCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
a = img
for bbox in img_annos:
a = scale_x + bbox[1] * (1 - scale_x)
a = scale_y + bbox[2] * (1 - scale_y)
a = scale_x + bbox[3] * (1 - scale_x)
a = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
a = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __A ( __lowerCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
a = ascii_lowercase + digits
return "".join(random.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 347 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
UpperCamelCase__ = BarthezTokenizer
UpperCamelCase__ = BarthezTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
super().setUp()
a : Optional[int] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=a_ )
a : Dict = tokenizer
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a : List[Any] = """<pad>"""
a : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(a_ ) , 10_1122 )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a : str = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
a : int = [0, 57, 3018, 7_0307, 91, 2]
a : Optional[Any] = self.tokenizer(
a_ , max_length=len(a_ ) , padding=a_ , truncation=a_ , return_tensors="""pt""" )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
a : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(a_ , a_ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a : Union[str, Any] = self.get_tokenizer()
a : str = self.get_rust_tokenizer()
a : Optional[Any] = """I was born in 92000, and this is falsé."""
a : Optional[Any] = tokenizer.tokenize(a_ )
a : str = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
a : Dict = tokenizer.encode(a_ , add_special_tokens=a_ )
a : List[str] = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
a : List[Any] = self.get_rust_tokenizer()
a : Dict = tokenizer.encode(a_ )
a : Union[str, Any] = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a : List[str] = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
a : int = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=a_ , )
| 359 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Optional[Any] = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ["MobileNetV2FeatureExtractor"]
__UpperCamelCase : Tuple = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 347 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = 'markuplm'
def __init__( self :Optional[int] , __magic_name__ :Any=3_0522 , __magic_name__ :Optional[int]=768 , __magic_name__ :List[str]=12 , __magic_name__ :Optional[Any]=12 , __magic_name__ :Union[str, Any]=3072 , __magic_name__ :str="gelu" , __magic_name__ :Any=0.1 , __magic_name__ :Any=0.1 , __magic_name__ :Dict=512 , __magic_name__ :int=2 , __magic_name__ :Union[str, Any]=0.02 , __magic_name__ :str=1E-1_2 , __magic_name__ :int=0 , __magic_name__ :str=0 , __magic_name__ :List[Any]=2 , __magic_name__ :List[str]=256 , __magic_name__ :str=1024 , __magic_name__ :Any=216 , __magic_name__ :List[Any]=1001 , __magic_name__ :Optional[int]=32 , __magic_name__ :Dict=50 , __magic_name__ :str="absolute" , __magic_name__ :Tuple=True , __magic_name__ :Optional[Any]=None , **__magic_name__ :Tuple , ):
'''simple docstring'''
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = position_embedding_type
a = use_cache
a = classifier_dropout
# additional properties
a = max_depth
a = max_xpath_tag_unit_embeddings
a = max_xpath_subs_unit_embeddings
a = tag_pad_id
a = subs_pad_id
a = xpath_unit_hidden_size
| 360 |
def __A ( __lowerCamelCase ) -> bool:
if num < 0:
return False
a = num
a = 0
while num > 0:
a = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 0 |
import torch
from torch import nn
class __lowerCAmelCase ( nn.Module ):
def __init__( self :Any , __magic_name__ :Optional[int] , __magic_name__ :Dict , __magic_name__ :Optional[Any] , __magic_name__ :int , __magic_name__ :List[Any]=1 , __magic_name__ :Any=False ):
'''simple docstring'''
super().__init__()
a = n_token
a = d_embed
a = d_proj
a = cutoffs + [n_token]
a = [0] + self.cutoffs
a = div_val
a = self.cutoffs[0]
a = len(self.cutoffs ) - 1
a = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
a = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
a = nn.Parameter(torch.zeros(self.n_clusters ) )
a = nn.ModuleList()
a = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_snake_case , _snake_case ) ) )
else:
self.out_projs.append(_snake_case )
self.out_layers.append(nn.Linear(_snake_case , _snake_case ) )
else:
for i in range(len(self.cutoffs ) ):
a , a = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_snake_case , _snake_case ) ) )
self.out_layers.append(nn.Linear(_snake_case , r_idx - l_idx ) )
a = keep_order
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[int] , __magic_name__ :List[str] , __magic_name__ :int ):
'''simple docstring'''
if proj is None:
a = nn.functional.linear(_snake_case , _snake_case , bias=_snake_case )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
a = nn.functional.linear(_snake_case , proj.t().contiguous() )
a = nn.functional.linear(_snake_case , _snake_case , bias=_snake_case )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowerCamelCase__ ( self :int , __magic_name__ :Tuple , __magic_name__ :List[str]=None , __magic_name__ :Union[str, Any]=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
a = hidden[..., :-1, :].contiguous()
a = labels[..., 1:].contiguous()
a = hidden.view(-1 , hidden.size(-1 ) )
a = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
a = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
a = self._compute_logit(_snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
a = labels != -100
a = torch.zeros_like(_snake_case , dtype=hidden.dtype , device=hidden.device )
a = (
-nn.functional.log_softmax(_snake_case , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
a = nn.functional.log_softmax(_snake_case , dim=-1 )
else:
# construct weights and biases
a , a = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
a , a = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a = self.out_layers[0].weight[l_idx:r_idx]
a = self.out_layers[0].bias[l_idx:r_idx]
else:
a = self.out_layers[i].weight
a = self.out_layers[i].bias
if i == 0:
a = torch.cat([weight_i, self.cluster_weight] , dim=0 )
a = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_snake_case )
biases.append(_snake_case )
a , a , a = weights[0], biases[0], self.out_projs[0]
a = self._compute_logit(_snake_case , _snake_case , _snake_case , _snake_case )
a = nn.functional.log_softmax(_snake_case , dim=1 )
if labels is None:
a = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
a = torch.zeros_like(_snake_case , dtype=hidden.dtype , device=hidden.device )
a = 0
a = [0] + self.cutoffs
for i in range(len(_snake_case ) - 1 ):
a , a = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
a = (labels >= l_idx) & (labels < r_idx)
a = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
a = labels.index_select(0 , _snake_case ) - l_idx
a = head_logprob.index_select(0 , _snake_case )
a = hidden.index_select(0 , _snake_case )
else:
a = hidden
if i == 0:
if labels is not None:
a = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
a = head_logprob[:, : self.cutoffs[0]]
else:
a , a , a = weights[i], biases[i], self.out_projs[i]
a = self._compute_logit(_snake_case , _snake_case , _snake_case , _snake_case )
a = nn.functional.log_softmax(_snake_case , dim=1 )
a = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
a = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
a = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
a = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , _snake_case , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowerCamelCase__ ( self :str , __magic_name__ :Union[str, Any] ):
'''simple docstring'''
if self.n_clusters == 0:
a = self._compute_logit(_snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(_snake_case , dim=-1 )
else:
# construct weights and biases
a , a = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
a , a = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a = self.out_layers[0].weight[l_idx:r_idx]
a = self.out_layers[0].bias[l_idx:r_idx]
else:
a = self.out_layers[i].weight
a = self.out_layers[i].bias
if i == 0:
a = torch.cat([weight_i, self.cluster_weight] , dim=0 )
a = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_snake_case )
biases.append(_snake_case )
a , a , a = weights[0], biases[0], self.out_projs[0]
a = self._compute_logit(_snake_case , _snake_case , _snake_case , _snake_case )
a = hidden.new_empty((head_logit.size(0 ), self.n_token) )
a = nn.functional.log_softmax(_snake_case , dim=1 )
a = [0] + self.cutoffs
for i in range(len(_snake_case ) - 1 ):
a , a = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
a = head_logprob[:, : self.cutoffs[0]]
else:
a , a , a = weights[i], biases[i], self.out_projs[i]
a = self._compute_logit(_snake_case , _snake_case , _snake_case , _snake_case )
a = nn.functional.log_softmax(_snake_case , dim=1 )
a = head_logprob[:, -i] + tail_logprob_i
a = logprob_i
return out
| 361 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = CanineTokenizer
UpperCamelCase__ = False
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
super().setUp()
a = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def lowerCamelCase__ ( self :Tuple , **__magic_name__ :Dict ):
'''simple docstring'''
a = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
a = 1024
return tokenizer
@require_torch
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.canine_tokenizer
a = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
a = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.canine_tokenizer
a = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , __magic_name__ )
self.assertIn("""attention_mask""" , __magic_name__ )
self.assertIn("""token_type_ids""" , __magic_name__ )
@require_torch
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.canine_tokenizer
a = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
a = tokenizer(
text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a = tempfile.mkdtemp()
a = """ He is very happy, UNwant\u00E9d,running"""
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
a = tokenizer.__class__.from_pretrained(__magic_name__ )
a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
shutil.rmtree(__magic_name__ )
a = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a = tempfile.mkdtemp()
a = """ He is very happy, UNwant\u00E9d,running"""
a = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
a = chr(0Xe_0_0_7 )
additional_special_tokens.append(__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
a = tokenizer.__class__.from_pretrained(__magic_name__ )
a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
a = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__magic_name__ )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a , a = self.get_clean_sequence(__magic_name__ )
# a special token for Canine can be defined as follows:
a = 0Xe_0_0_5
a = chr(__magic_name__ )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
a = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , input_encoded + special_token_id )
a = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
self.assertTrue(special_token not in decoded )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a = chr(0Xe_0_0_5 )
a = chr(0Xe_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
a = tokenizer.tokenize(__magic_name__ )
a = tokenizer.tokenize(__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(token_a[0] , __magic_name__ )
self.assertEqual(token_a[0] , __magic_name__ )
@require_tokenizers
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
a = 0Xe_0_0_6
a = chr(__magic_name__ )
a = AddedToken(__magic_name__ , lstrip=__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__magic_name__ )
tokenizer.from_pretrained(__magic_name__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
a = json.load(__magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
a = json.load(__magic_name__ )
# a special token for Canine can be defined as follows:
a = 0Xe_0_0_6
a = chr(__magic_name__ )
a = [new_token_a]
a = [new_token_a]
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
a = 0Xe_0_0_7
a = chr(__magic_name__ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a = [AddedToken(__magic_name__ , lstrip=__magic_name__ )]
a = tokenizer_class.from_pretrained(
__magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a = """hello world"""
if self.space_between_special_tokens:
a = """[CLS] hello world [SEP]"""
else:
a = input
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__magic_name__ , [output, output.lower()] )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
a = """a"""
a = ord(__magic_name__ )
for attr in attributes_list:
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] )
a = 0Xe_0_0_6
a = chr(__magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
| 347 | 0 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
if isinstance(lowercase_ , torch.Tensor ):
return image
elif isinstance(lowercase_ , PIL.Image.Image ):
a = [image]
if isinstance(image[0] , PIL.Image.Image ):
a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
a = np.concatenate(lowercase_ , axis=0 )
a = np.array(lowercase_ ).astype(np.floataa ) / 255.0
a = image.transpose(0 , 3 , 1 , 2 )
a = 2.0 * image - 1.0
a = torch.from_numpy(lowercase_ )
elif isinstance(image[0] , torch.Tensor ):
a = torch.cat(lowercase_ , dim=0 )
return image
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.9995 ) -> Any:
if not isinstance(lowercase_ , np.ndarray ):
a = True
a = va.device
a = va.cpu().numpy()
a = va.cpu().numpy()
a = np.sum(va * va / (np.linalg.norm(lowercase_ ) * np.linalg.norm(lowercase_ )) )
if np.abs(lowercase_ ) > DOT_THRESHOLD:
a = (1 - t) * va + t * va
else:
a = np.arccos(lowercase_ )
a = np.sin(lowercase_ )
a = theta_a * t
a = np.sin(lowercase_ )
a = np.sin(theta_a - theta_t ) / sin_theta_a
a = sin_theta_t / sin_theta_a
a = sa * va + sa * va
if inputs_are_torch:
a = torch.from_numpy(lowercase_ ).to(lowercase_ )
return va
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
a = F.normalize(lowercase_ , dim=-1 )
a = F.normalize(lowercase_ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
for param in model.parameters():
a = value
class __lowerCAmelCase ( __snake_case ):
def __init__( self :List[Any] , __magic_name__ :List[Any] , __magic_name__ :Any , __magic_name__ :Dict , __magic_name__ :Tuple , __magic_name__ :Any , __magic_name__ :Any , __magic_name__ :Dict , __magic_name__ :Dict=None , __magic_name__ :int=None , __magic_name__ :Tuple=None , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__magic_name__ , text_encoder=__magic_name__ , clip_model=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , feature_extractor=__magic_name__ , coca_model=__magic_name__ , coca_tokenizer=__magic_name__ , coca_transform=__magic_name__ , )
a = (
feature_extractor.size
if isinstance(feature_extractor.size , __magic_name__ )
else feature_extractor.size["""shortest_edge"""]
)
a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __magic_name__ )
set_requires_grad(self.clip_model , __magic_name__ )
def lowerCamelCase__ ( self :str , __magic_name__ :str = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
self.enable_attention_slicing(__magic_name__ )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
set_requires_grad(self.vae , __magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
set_requires_grad(self.vae , __magic_name__ )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
set_requires_grad(self.unet , __magic_name__ )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
set_requires_grad(self.unet , __magic_name__ )
def lowerCamelCase__ ( self :Dict , __magic_name__ :Optional[Any] , __magic_name__ :Optional[int] , __magic_name__ :List[Any] ):
'''simple docstring'''
a = min(int(num_inference_steps * strength ) , __magic_name__ )
a = max(num_inference_steps - init_timestep , 0 )
a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase__ ( self :str , __magic_name__ :Optional[Any] , __magic_name__ :Any , __magic_name__ :List[Any] , __magic_name__ :Dict , __magic_name__ :str , __magic_name__ :Optional[int]=None ):
'''simple docstring'''
if not isinstance(__magic_name__ , torch.Tensor ):
raise ValueError(F'`image` has to be of type `torch.Tensor` but is {type(__magic_name__ )}' )
a = image.to(device=__magic_name__ , dtype=__magic_name__ )
if isinstance(__magic_name__ , __magic_name__ ):
a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__magic_name__ )
]
a = torch.cat(__magic_name__ , dim=0 )
else:
a = self.vae.encode(__magic_name__ ).latent_dist.sample(__magic_name__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a = 0.18215 * init_latents
a = init_latents.repeat_interleave(__magic_name__ , dim=0 )
a = randn_tensor(init_latents.shape , generator=__magic_name__ , device=__magic_name__ , dtype=__magic_name__ )
# get latents
a = self.scheduler.add_noise(__magic_name__ , __magic_name__ , __magic_name__ )
a = init_latents
return latents
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Dict ):
'''simple docstring'''
a = self.coca_transform(__magic_name__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" )
def lowerCamelCase__ ( self :Dict , __magic_name__ :List[str] , __magic_name__ :Dict ):
'''simple docstring'''
a = self.feature_extractor.preprocess(__magic_name__ )
a = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
a = self.clip_model.get_image_features(__magic_name__ )
a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__magic_name__ )
a = image_embeddings_clip.repeat_interleave(__magic_name__ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowerCamelCase__ ( self :Any , __magic_name__ :List[str] , __magic_name__ :Dict , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[Any] , __magic_name__ :Optional[int] , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[Any] , ):
'''simple docstring'''
a = latents.detach().requires_grad_()
a = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
# predict the noise residual
a = self.unet(__magic_name__ , __magic_name__ , encoder_hidden_states=__magic_name__ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
a = self.scheduler.alphas_cumprod[timestep]
a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
a = torch.sqrt(__magic_name__ )
a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __magic_name__ ):
a = self.scheduler.sigmas[index]
a = latents - sigma * noise_pred
else:
raise ValueError(F'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a = 1 / 0.18215 * sample
a = self.vae.decode(__magic_name__ ).sample
a = (image / 2 + 0.5).clamp(0 , 1 )
a = transforms.Resize(self.feature_extractor_size )(__magic_name__ )
a = self.normalize(__magic_name__ ).to(latents.dtype )
a = self.clip_model.get_image_features(__magic_name__ )
a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__magic_name__ )
a = spherical_dist_loss(__magic_name__ , __magic_name__ ).mean() * clip_guidance_scale
a = -torch.autograd.grad(__magic_name__ , __magic_name__ )[0]
if isinstance(self.scheduler , __magic_name__ ):
a = latents.detach() + grads * (sigma**2)
a = noise_pred_original
else:
a = noise_pred_original - torch.sqrt(__magic_name__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self :str , __magic_name__ :int , __magic_name__ :Optional[Any] , __magic_name__ :List[Any] = None , __magic_name__ :str = None , __magic_name__ :Optional[int] = 512 , __magic_name__ :Union[str, Any] = 512 , __magic_name__ :Union[str, Any] = 0.6 , __magic_name__ :Optional[Any] = 50 , __magic_name__ :int = 7.5 , __magic_name__ :int = 1 , __magic_name__ :List[Any] = 0.0 , __magic_name__ :Any = 100 , __magic_name__ :Optional[int] = None , __magic_name__ :Dict = "pil" , __magic_name__ :Optional[Any] = True , __magic_name__ :Any = 0.8 , __magic_name__ :List[Any] = 0.1 , __magic_name__ :List[Any] = 0.1 , ):
'''simple docstring'''
if isinstance(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) != batch_size:
raise ValueError(F'You have passed {batch_size} batch_size, but only {len(__magic_name__ )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(__magic_name__ , torch.Generator ) and batch_size > 1:
a = [generator] + [None] * (batch_size - 1)
a = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
a = [x[0] for x in coca_is_none if x[1]]
a = """, """.join(__magic_name__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__magic_name__ ):
raise ValueError(
F'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
F'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
a = self.get_image_description(__magic_name__ )
if style_prompt is None:
if len(__magic_name__ ):
raise ValueError(
F'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
F' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
a = self.get_image_description(__magic_name__ )
# get prompt text embeddings for content and style
a = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=__magic_name__ , return_tensors="""pt""" , )
a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
a = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=__magic_name__ , return_tensors="""pt""" , )
a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
a = slerp(__magic_name__ , __magic_name__ , __magic_name__ )
# duplicate text embeddings for each generation per prompt
a = text_embeddings.repeat_interleave(__magic_name__ , dim=0 )
# set timesteps
a = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
a = {}
if accepts_offset:
a = 1
self.scheduler.set_timesteps(__magic_name__ , **__magic_name__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
a , a = self.get_timesteps(__magic_name__ , __magic_name__ , self.device )
a = timesteps[:1].repeat(__magic_name__ )
# Preprocess image
a = preprocess(__magic_name__ , __magic_name__ , __magic_name__ )
a = self.prepare_latents(
__magic_name__ , __magic_name__ , __magic_name__ , text_embeddings.dtype , self.device , __magic_name__ )
a = preprocess(__magic_name__ , __magic_name__ , __magic_name__ )
a = self.prepare_latents(
__magic_name__ , __magic_name__ , __magic_name__ , text_embeddings.dtype , self.device , __magic_name__ )
a = slerp(__magic_name__ , __magic_name__ , __magic_name__ )
if clip_guidance_scale > 0:
a = self.get_clip_image_embeddings(__magic_name__ , __magic_name__ )
a = self.get_clip_image_embeddings(__magic_name__ , __magic_name__ )
a = slerp(
__magic_name__ , __magic_name__ , __magic_name__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
a = content_text_input.input_ids.shape[-1]
a = self.tokenizer([""""""] , padding="""max_length""" , max_length=__magic_name__ , return_tensors="""pt""" )
a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
a = uncond_embeddings.repeat_interleave(__magic_name__ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
a = torch.randn(__magic_name__ , generator=__magic_name__ , device="""cpu""" , dtype=__magic_name__ ).to(
self.device )
else:
a = torch.randn(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a = {}
if accepts_eta:
a = eta
# check if the scheduler accepts generator
a = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
a = generator
with self.progress_bar(total=__magic_name__ ):
for i, t in enumerate(__magic_name__ ):
# expand the latents if we are doing classifier free guidance
a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
# predict the noise residual
a = self.unet(__magic_name__ , __magic_name__ , encoder_hidden_states=__magic_name__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
a , a = noise_pred.chunk(2 )
a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
a , a = self.cond_fn(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , )
# compute the previous noisy sample x_t -> x_t-1
a = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a = 1 / 0.18215 * latents
a = self.vae.decode(__magic_name__ ).sample
a = (image / 2 + 0.5).clamp(0 , 1 )
a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__magic_name__ , nsfw_content_detected=__magic_name__ ) | 362 |
def __A ( __lowerCamelCase ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 0 |
def __A ( __lowerCamelCase ) -> str:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("""\'float\' object cannot be interpreted as an integer""" )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("""\'str\' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
a = False
if num < 0:
a = True
a = -num
a = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(UpperCamelCase__ ) for e in binary )
return "0b" + "".join(str(UpperCamelCase__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
def __A ( __lowerCamelCase ) -> int:
if not numbers:
return 0
if not isinstance(__lowerCamelCase , (list, tuple) ) or not all(
isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
a = a = a = numbers[0]
for i in range(1 , len(__lowerCamelCase ) ):
# update the maximum and minimum subarray products
a = numbers[i]
if number < 0:
a , a = min_till_now, max_till_now
a = max(__lowerCamelCase , max_till_now * number )
a = min(__lowerCamelCase , min_till_now * number )
# update the maximum product found till now
a = max(__lowerCamelCase , __lowerCamelCase )
return max_prod
| 347 | 0 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
__UpperCamelCase : List[str] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
__UpperCamelCase : List[str] = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
__UpperCamelCase : Any = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
__UpperCamelCase : List[str] = sorted(arg_to_scheduler.keys())
__UpperCamelCase : Dict = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class __lowerCAmelCase ( pl.LightningModule ):
def __init__( self :List[Any] , __magic_name__ :Dict , __magic_name__ :Tuple=None , __magic_name__ :str="base" , __magic_name__ :Any=None , __magic_name__ :List[Any]=None , __magic_name__ :Optional[int]=None , **__magic_name__ :List[str] , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_SCREAMING_SNAKE_CASE )
a = 0
a = Path(self.hparams.output_dir )
a = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
a = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
else:
a = config
a = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert hasattr(self.config , _SCREAMING_SNAKE_CASE ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , _SCREAMING_SNAKE_CASE , getattr(self.hparams , _SCREAMING_SNAKE_CASE ) )
if tokenizer is None:
a = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_SCREAMING_SNAKE_CASE , )
else:
a = tokenizer
a = MODEL_MODES[mode]
if model is None:
a = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_SCREAMING_SNAKE_CASE , )
else:
a = model
def lowerCamelCase__ ( self :List[Any] , *__magic_name__ :str , **__magic_name__ :List[Any] ):
'''simple docstring'''
a = self.model_type.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = arg_to_scheduler[self.hparams.lr_scheduler]
a = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
a = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = self.model
a = ["bias", "LayerNorm.weight"]
a = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
a = Adafactor(
_SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , scale_parameter=_SCREAMING_SNAKE_CASE , relative_step=_SCREAMING_SNAKE_CASE )
else:
a = AdamW(
_SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
a = optimizer
a = self.get_lr_scheduler()
return [optimizer], [scheduler]
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Optional[Any] , __magic_name__ :List[Any] ):
'''simple docstring'''
return self.validation_step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self :str , __magic_name__ :List[Any] ):
'''simple docstring'''
return self.validation_end(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
a = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Any ):
'''simple docstring'''
if stage == "test":
a = len(self.test_dataloader().dataset )
else:
a = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=_SCREAMING_SNAKE_CASE )
a = len(self.train_dataloader().dataset )
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any] , __magic_name__ :Tuple = False ):
'''simple docstring'''
raise NotImplementedError("""You must implement this for your task""" )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
return self.train_loader
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self :int , __magic_name__ :Dict ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , """cached_{}_{}_{}""".format(
_SCREAMING_SNAKE_CASE , list(filter(_SCREAMING_SNAKE_CASE , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def lowerCamelCase__ ( self :str , __magic_name__ :Tuple ):
'''simple docstring'''
a = self.output_dir.joinpath("""best_tfmr""" )
a = self.step_count
self.model.save_pretrained(_SCREAMING_SNAKE_CASE )
self.tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
@staticmethod
def lowerCamelCase__ ( __magic_name__ :List[str] , __magic_name__ :Tuple ):
'''simple docstring'''
parser.add_argument(
"""--model_name_or_path""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--config_name""" , default="""""" , type=_SCREAMING_SNAKE_CASE , help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument(
"""--cache_dir""" , default=str(Path(_SCREAMING_SNAKE_CASE ).parent / """test_run""" / """cache""" ) , type=_SCREAMING_SNAKE_CASE , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , )
parser.add_argument(
"""--encoder_layerdrop""" , type=_SCREAMING_SNAKE_CASE , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--decoder_layerdrop""" , type=_SCREAMING_SNAKE_CASE , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--dropout""" , type=_SCREAMING_SNAKE_CASE , help="""Dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--attention_dropout""" , type=_SCREAMING_SNAKE_CASE , help="""Attention dropout probability (Optional). Goes into model.config""" , )
parser.add_argument("""--learning_rate""" , default=5E-5 , type=_SCREAMING_SNAKE_CASE , help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""" , default="""linear""" , choices=_SCREAMING_SNAKE_CASE , metavar=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""Learning rate scheduler""" , )
parser.add_argument("""--weight_decay""" , default=0.0 , type=_SCREAMING_SNAKE_CASE , help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=_SCREAMING_SNAKE_CASE , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""" , default=0 , type=_SCREAMING_SNAKE_CASE , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""" , default=4 , type=_SCREAMING_SNAKE_CASE , help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=_SCREAMING_SNAKE_CASE )
parser.add_argument("""--train_batch_size""" , default=32 , type=_SCREAMING_SNAKE_CASE )
parser.add_argument("""--eval_batch_size""" , default=32 , type=_SCREAMING_SNAKE_CASE )
parser.add_argument("""--adafactor""" , action="""store_true""" )
class __lowerCAmelCase ( pl.Callback ):
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :List[Any] , __magic_name__ :Optional[int] ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __lowerCAmelCase ( pl.Callback ):
def lowerCamelCase__ ( self :List[str] , __magic_name__ :List[Any] , __magic_name__ :str ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( pl.Callback ):
def lowerCamelCase__ ( self :Dict , __magic_name__ :Optional[int] , __magic_name__ :Tuple ):
'''simple docstring'''
a = trainer.lr_schedulers[0]["scheduler"]
a = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :Any ):
'''simple docstring'''
rank_zero_info("""***** Validation results *****""" )
a = trainer.callback_metrics
# Log results
for key in sorted(_SCREAMING_SNAKE_CASE ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(_SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Optional[int] , __magic_name__ :int ):
'''simple docstring'''
rank_zero_info("""***** Test results *****""" )
a = trainer.callback_metrics
# Log and save results to file
a = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as writer:
for key in sorted(_SCREAMING_SNAKE_CASE ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(_SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(_SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
"""--output_dir""" , default=str(Path(_a ).parent / """test_run""" / """model_checkpoints""" ) , type=_a , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=_a , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=_a )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=_a , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=_a , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=_a , default=42 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(_a ).parent / """test_run""" / """dummy-train-data""" ) , type=_a , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=[] , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Any:
pl.seed_everything(args.seed )
# init model
a = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_a )
# add custom checkpoints
if checkpoint_callback is None:
a = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_a )
if logging_callback is None:
a = LoggingCallback()
a = {}
if args.fpaa:
a = 16
if args.gpus > 1:
a = "auto"
a = "ddp"
a = args.accumulate_grad_batches
a = None
a = "auto"
a = pl.Trainer.from_argparse_args(
_a , weights_summary=_a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_a , val_check_interval=1 , num_sanity_val_steps=2 , **_a , )
if args.do_train:
trainer.fit(_a )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 364 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Optional[Any] = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 347 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __A ( __lowerCamelCase ) -> List[str]:
return "".join(sorted(_UpperCAmelCase ) )
def __A ( __lowerCamelCase ) -> List[Any]:
return word_by_signature[signature(_UpperCAmelCase )]
__UpperCamelCase : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
__UpperCamelCase : Optional[int] = sorted({word.strip().lower() for word in data.splitlines()})
__UpperCamelCase : Tuple = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__UpperCamelCase : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 365 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , __lowerCamelCase )
a = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
a = dataset_size < in_memory_max_size
else:
a = False
a = is_small_dataset(__lowerCamelCase )
assert result == expected
| 347 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __A ( __lowerCamelCase ) -> int:
a = filter(lambda __lowerCamelCase : p.requires_grad , model.parameters() )
a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__UpperCamelCase : List[Any] = logging.getLogger(__name__)
def __A ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
if metric == "rouge2":
a = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
a = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
a = """{val_avg_em:.4f}-{step_count}"""
elif metric == "loss":
a = """{val_avg_loss:.4f}-{step_count}"""
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
""" function.""" )
a = ModelCheckpoint(
dirpath=SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , monitor=f'val_{metric}' , mode="""max""" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Any:
return EarlyStopping(
monitor=f'val_{metric}' , mode="""min""" if """loss""" in metric else """max""" , patience=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , )
class __lowerCAmelCase ( pl.Callback ):
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[Any] , __magic_name__ :Any ):
'''simple docstring'''
a = {F'lr_group_{i}': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase__ )
@rank_zero_only
def lowerCamelCase__ ( self :str , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :Optional[Any]=True ):
'''simple docstring'''
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
a = Path(pl_module.hparams.output_dir )
if type_path == "test":
a = od / """test_results.txt"""
a = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
a = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
a = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=lowerCAmelCase__ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase__ )
with open(lowerCAmelCase__ , """a+""" ) as writer:
for key in sorted(lowerCAmelCase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
a = metrics[key]
if isinstance(lowerCAmelCase__ , torch.Tensor ):
a = val.item()
a = F'{key}: {val:.6f}\n'
writer.write(lowerCAmelCase__ )
if not save_generations:
return
if "preds" in metrics:
a = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(lowerCAmelCase__ )
@rank_zero_only
def lowerCamelCase__ ( self :Any , __magic_name__ :int , __magic_name__ :str ):
'''simple docstring'''
try:
a = pl_module.model.model.num_parameters()
except AttributeError:
a = pl_module.model.num_parameters()
a = count_trainable_parameters(lowerCAmelCase__ )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCamelCase__ ( self :int , __magic_name__ :List[str] , __magic_name__ :Any ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase__ , lowerCAmelCase__ , """test""" )
@rank_zero_only
def lowerCamelCase__ ( self :List[str] , __magic_name__ :List[str] , __magic_name__ :List[str] ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 366 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __A ( __lowerCamelCase ) -> bool:
a = int(number**0.5 )
return number == sq * sq
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> tuple[int, int]:
a = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
a = x_den * y_den * z_den
a = gcd(__lowerCamelCase , __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def __A ( __lowerCamelCase = 35 ) -> int:
a = set()
a = 42
a = Fraction(0 )
a = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
a = x_num * y_den + x_den * y_num
a = x_den * y_den
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
a = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
a = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
a = int(sqrt(__lowerCamelCase ) )
a = int(sqrt(__lowerCamelCase ) )
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
a = x_num * y_num
a = x_den * y_num + x_num * y_den
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
a = x_num * x_num * y_num * y_num
a = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
a = int(sqrt(__lowerCamelCase ) )
a = int(sqrt(__lowerCamelCase ) )
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase , __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'{solution() = }')
| 347 | 0 |
from functools import lru_cache
@lru_cache
def __A ( __lowerCamelCase ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self :List[str] , __magic_name__ :List[str] , __magic_name__ :List[Any]=13 , __magic_name__ :Any=7 , __magic_name__ :Optional[int]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :Any=99 , __magic_name__ :List[str]=32 , __magic_name__ :List[str]=5 , __magic_name__ :str=4 , __magic_name__ :str=37 , __magic_name__ :Optional[int]="gelu" , __magic_name__ :int=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :List[str]=512 , __magic_name__ :Tuple=16 , __magic_name__ :Tuple=2 , __magic_name__ :List[str]=0.02 , __magic_name__ :Any=4 , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_attention_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_choices
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_attention_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = FlaxRoFormerModelTester(self )
@slow
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__magic_name__ )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
a = jnp.array([[0, 1, 2, 3, 4, 5]] )
a = model(__magic_name__ )[0]
a = 5_0000
a = (1, 6, vocab_size)
self.assertEqual(output.shape , __magic_name__ )
a = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 347 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
a = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(_lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_lowerCamelCase ) , x.transpose() ) )
a = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_lowerCamelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = np.random.randn(3 , 4 )
a = torch.tensor(_lowerCamelCase )
self.assertTrue(np.allclose(transpose(_lowerCamelCase ) , transpose(_lowerCamelCase ).numpy() ) )
a = np.random.randn(3 , 4 , 5 )
a = torch.tensor(_lowerCamelCase )
self.assertTrue(np.allclose(transpose(_lowerCamelCase , axes=(1, 2, 0) ) , transpose(_lowerCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = np.random.randn(3 , 4 )
a = tf.constant(_lowerCamelCase )
self.assertTrue(np.allclose(transpose(_lowerCamelCase ) , transpose(_lowerCamelCase ).numpy() ) )
a = np.random.randn(3 , 4 , 5 )
a = tf.constant(_lowerCamelCase )
self.assertTrue(np.allclose(transpose(_lowerCamelCase , axes=(1, 2, 0) ) , transpose(_lowerCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = np.random.randn(3 , 4 )
a = jnp.array(_lowerCamelCase )
self.assertTrue(np.allclose(transpose(_lowerCamelCase ) , np.asarray(transpose(_lowerCamelCase ) ) ) )
a = np.random.randn(3 , 4 , 5 )
a = jnp.array(_lowerCamelCase )
self.assertTrue(np.allclose(transpose(_lowerCamelCase , axes=(1, 2, 0) ) , np.asarray(transpose(_lowerCamelCase , axes=(1, 2, 0) ) ) ) )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (4, 3) ) , np.reshape(_lowerCamelCase , (4, 3) ) ) )
a = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (12, 5) ) , np.reshape(_lowerCamelCase , (12, 5) ) ) )
@require_torch
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = np.random.randn(3 , 4 )
a = torch.tensor(_lowerCamelCase )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (4, 3) ) , reshape(_lowerCamelCase , (4, 3) ).numpy() ) )
a = np.random.randn(3 , 4 , 5 )
a = torch.tensor(_lowerCamelCase )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (12, 5) ) , reshape(_lowerCamelCase , (12, 5) ).numpy() ) )
@require_tf
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = np.random.randn(3 , 4 )
a = tf.constant(_lowerCamelCase )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (4, 3) ) , reshape(_lowerCamelCase , (4, 3) ).numpy() ) )
a = np.random.randn(3 , 4 , 5 )
a = tf.constant(_lowerCamelCase )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (12, 5) ) , reshape(_lowerCamelCase , (12, 5) ).numpy() ) )
@require_flax
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = np.random.randn(3 , 4 )
a = jnp.array(_lowerCamelCase )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (4, 3) ) , np.asarray(reshape(_lowerCamelCase , (4, 3) ) ) ) )
a = np.random.randn(3 , 4 , 5 )
a = jnp.array(_lowerCamelCase )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (12, 5) ) , np.asarray(reshape(_lowerCamelCase , (12, 5) ) ) ) )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase ) , np.squeeze(_lowerCamelCase ) ) )
a = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase , axis=2 ) , np.squeeze(_lowerCamelCase , axis=2 ) ) )
@require_torch
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = np.random.randn(1 , 3 , 4 )
a = torch.tensor(_lowerCamelCase )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase ) , squeeze(_lowerCamelCase ).numpy() ) )
a = np.random.randn(1 , 4 , 1 , 5 )
a = torch.tensor(_lowerCamelCase )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase , axis=2 ) , squeeze(_lowerCamelCase , axis=2 ).numpy() ) )
@require_tf
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = np.random.randn(1 , 3 , 4 )
a = tf.constant(_lowerCamelCase )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase ) , squeeze(_lowerCamelCase ).numpy() ) )
a = np.random.randn(1 , 4 , 1 , 5 )
a = tf.constant(_lowerCamelCase )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase , axis=2 ) , squeeze(_lowerCamelCase , axis=2 ).numpy() ) )
@require_flax
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = np.random.randn(1 , 3 , 4 )
a = jnp.array(_lowerCamelCase )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase ) , np.asarray(squeeze(_lowerCamelCase ) ) ) )
a = np.random.randn(1 , 4 , 1 , 5 )
a = jnp.array(_lowerCamelCase )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase , axis=2 ) , np.asarray(squeeze(_lowerCamelCase , axis=2 ) ) ) )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_lowerCamelCase , axis=1 ) , np.expand_dims(_lowerCamelCase , axis=1 ) ) )
@require_torch
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = np.random.randn(3 , 4 )
a = torch.tensor(_lowerCamelCase )
self.assertTrue(np.allclose(expand_dims(_lowerCamelCase , axis=1 ) , expand_dims(_lowerCamelCase , axis=1 ).numpy() ) )
@require_tf
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = np.random.randn(3 , 4 )
a = tf.constant(_lowerCamelCase )
self.assertTrue(np.allclose(expand_dims(_lowerCamelCase , axis=1 ) , expand_dims(_lowerCamelCase , axis=1 ).numpy() ) )
@require_flax
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = np.random.randn(3 , 4 )
a = jnp.array(_lowerCamelCase )
self.assertTrue(np.allclose(expand_dims(_lowerCamelCase , axis=1 ) , np.asarray(expand_dims(_lowerCamelCase , axis=1 ) ) ) )
| 368 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[int] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 347 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__UpperCamelCase : Any = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
__UpperCamelCase : str = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ = RobertaTokenizer
def __init__( self :Union[str, Any] , __magic_name__ :int=None , __magic_name__ :int=None , __magic_name__ :Any=None , __magic_name__ :int="replace" , __magic_name__ :str="<s>" , __magic_name__ :Dict="</s>" , __magic_name__ :Optional[int]="</s>" , __magic_name__ :List[str]="<s>" , __magic_name__ :Dict="<unk>" , __magic_name__ :str="<pad>" , __magic_name__ :List[str]="<mask>" , __magic_name__ :Any=False , __magic_name__ :Optional[int]=True , **__magic_name__ :List[str] , ):
'''simple docstring'''
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _a ) != add_prefix_space:
a = getattr(_a , pre_tok_state.pop("""type""" ) )
a = add_prefix_space
a = pre_tok_class(**_a )
a = add_prefix_space
a = "post_processor"
a = getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a = tuple(state["""sep"""] )
if "cls" in state:
a = tuple(state["""cls"""] )
a = False
if state.get("""add_prefix_space""" , _a ) != add_prefix_space:
a = add_prefix_space
a = True
if state.get("""trim_offsets""" , _a ) != trim_offsets:
a = trim_offsets
a = True
if changes_to_apply:
a = getattr(_a , state.pop("""type""" ) )
a = component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Tuple ):
'''simple docstring'''
a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
a = value
def lowerCamelCase__ ( self :str , *__magic_name__ :Optional[int] , **__magic_name__ :int ):
'''simple docstring'''
a = kwargs.get("""is_split_into_words""" , _a )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def lowerCamelCase__ ( self :str , *__magic_name__ :int , **__magic_name__ :List[Any] ):
'''simple docstring'''
a = kwargs.get("""is_split_into_words""" , _a )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def lowerCamelCase__ ( self :int , __magic_name__ :Union[str, Any] , __magic_name__ :Tuple = None ):
'''simple docstring'''
a = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def lowerCamelCase__ ( self :int , __magic_name__ :Union[str, Any] , __magic_name__ :Any=None ):
'''simple docstring'''
a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Tuple , __magic_name__ :List[Any] = None ):
'''simple docstring'''
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 369 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = (IPNDMScheduler,)
UpperCamelCase__ = (('''num_inference_steps''', 50),)
def lowerCamelCase__ ( self :Any , **__magic_name__ :Optional[Any] ):
'''simple docstring'''
a = {"""num_train_timesteps""": 1000}
config.update(**__magic_name__ )
return config
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple=0 , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__magic_name__ )
a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
a = dummy_past_residuals[:]
if time_step is None:
a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
a = scheduler_class.from_pretrained(__magic_name__ )
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
a = dummy_past_residuals[:]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[Any]=0 , **__magic_name__ :Any ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals (must be after setting timesteps)
a = dummy_past_residuals[:]
if time_step is None:
a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
a = scheduler_class.from_pretrained(__magic_name__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residual (must be after setting timesteps)
a = dummy_past_residuals[:]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self :Optional[Any] , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__magic_name__ )
a = scheduler_class(**__magic_name__ )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter
scheduler.set_timesteps(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
a = model(__magic_name__ , __magic_name__ )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
a = model(__magic_name__ , __magic_name__ )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
return sample
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
if num_inference_steps is not None and hasattr(__magic_name__ , """set_timesteps""" ):
scheduler.set_timesteps(__magic_name__ )
elif num_inference_steps is not None and not hasattr(__magic_name__ , """set_timesteps""" ):
a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a = dummy_past_residuals[:]
a = scheduler.timesteps[5]
a = scheduler.timesteps[6]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__magic_name__ , time_step=__magic_name__ )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__magic_name__ , time_step=__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.full_loop()
a = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 347 | 0 |
def __A ( __lowerCamelCase = 100_0000 ) -> Dict:
a = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 370 |
__UpperCamelCase : Dict = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def __A ( ) -> None:
a = input("""Enter message: """ )
a = input("""Enter key [alphanumeric]: """ )
a = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
a = """encrypt"""
a = encrypt_message(__lowerCamelCase , __lowerCamelCase )
elif mode.lower().startswith("""d""" ):
a = """decrypt"""
a = decrypt_message(__lowerCamelCase , __lowerCamelCase )
print(f'\n{mode.title()}ed message:' )
print(__lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
return translate_message(__lowerCamelCase , __lowerCamelCase , """encrypt""" )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
return translate_message(__lowerCamelCase , __lowerCamelCase , """decrypt""" )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
a = []
a = 0
a = key.upper()
for symbol in message:
a = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__lowerCamelCase ):
a = 0
else:
translated.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
if __name__ == "__main__":
main()
| 347 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__UpperCamelCase : int = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( _a ):
UpperCamelCase__ = ['''pixel_values''']
def __init__( self :List[str] , __magic_name__ :bool = True , __magic_name__ :Dict[str, int] = None , __magic_name__ :PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ :bool = True , __magic_name__ :Dict[str, int] = None , __magic_name__ :bool = True , __magic_name__ :Union[int, float] = 1 / 255 , __magic_name__ :bool = True , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :bool = True , **__magic_name__ :Dict , ):
'''simple docstring'''
super().__init__(**snake_case_ )
a = size if size is not None else {"""shortest_edge""": 224}
a = get_size_dict(snake_case_ , default_to_square=snake_case_ )
a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
a = get_size_dict(snake_case_ , default_to_square=snake_case_ , param_name="""crop_size""" )
a = do_resize
a = size
a = resample
a = do_center_crop
a = crop_size
a = do_rescale
a = rescale_factor
a = do_normalize
a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a = image_std if image_std is not None else OPENAI_CLIP_STD
a = do_convert_rgb
def lowerCamelCase__ ( self :Any , __magic_name__ :np.ndarray , __magic_name__ :Dict[str, int] , __magic_name__ :PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :List[str] , ):
'''simple docstring'''
a = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
a = get_resize_output_image_size(snake_case_ , size=size["""shortest_edge"""] , default_to_square=snake_case_ )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :np.ndarray , __magic_name__ :Dict[str, int] , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :str , ):
'''simple docstring'''
a = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(snake_case_ , size=(size["""height"""], size["""width"""]) , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self :str , __magic_name__ :np.ndarray , __magic_name__ :Union[int, float] , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :Optional[Any] , ):
'''simple docstring'''
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :np.ndarray , __magic_name__ :Union[float, List[float]] , __magic_name__ :Union[float, List[float]] , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :Dict , ):
'''simple docstring'''
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :ImageInput , __magic_name__ :bool = None , __magic_name__ :Dict[str, int] = None , __magic_name__ :PILImageResampling = None , __magic_name__ :bool = None , __magic_name__ :int = None , __magic_name__ :bool = None , __magic_name__ :float = None , __magic_name__ :bool = None , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :bool = None , __magic_name__ :Optional[Union[str, TensorType]] = None , __magic_name__ :Optional[ChannelDimension] = ChannelDimension.FIRST , **__magic_name__ :Tuple , ):
'''simple docstring'''
a = do_resize if do_resize is not None else self.do_resize
a = size if size is not None else self.size
a = get_size_dict(snake_case_ , param_name="""size""" , default_to_square=snake_case_ )
a = resample if resample is not None else self.resample
a = do_center_crop if do_center_crop is not None else self.do_center_crop
a = crop_size if crop_size is not None else self.crop_size
a = get_size_dict(snake_case_ , param_name="""crop_size""" , default_to_square=snake_case_ )
a = do_rescale if do_rescale is not None else self.do_rescale
a = rescale_factor if rescale_factor is not None else self.rescale_factor
a = do_normalize if do_normalize is not None else self.do_normalize
a = image_mean if image_mean is not None else self.image_mean
a = image_std if image_std is not None else self.image_std
a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a = [convert_to_rgb(snake_case_ ) for image in images]
# All transformations expect numpy arrays.
a = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
a = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_center_crop:
a = [self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images]
if do_rescale:
a = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
a = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
a = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
a = {"""pixel_values""": images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 371 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Tuple=13 , __magic_name__ :List[Any]=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :List[str]=True , __magic_name__ :str=99 , __magic_name__ :Optional[Any]=32 , __magic_name__ :Union[str, Any]=5 , __magic_name__ :Any=4 , __magic_name__ :int=37 , __magic_name__ :Tuple="gelu" , __magic_name__ :List[str]=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :Tuple=512 , __magic_name__ :Dict=16 , __magic_name__ :Optional[int]=2 , __magic_name__ :Optional[int]=0.02 , __magic_name__ :Optional[Any]=4 , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_attention_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_choices
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_attention_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = True
a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained("""roberta-base""" , from_pt=__magic_name__ )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
| 347 | 0 |
from __future__ import annotations
class __lowerCAmelCase :
def __init__( self :Optional[int] , __magic_name__ :int ):
'''simple docstring'''
a = data
a = None
a = None
def __A ( __lowerCamelCase ) -> None: # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __A ( __lowerCamelCase ) -> int:
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def __A ( __lowerCamelCase ) -> bool:
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __A ( ) -> None: # Main function for testing.
a = Node(1 )
a = Node(2 )
a = Node(3 )
a = Node(4 )
a = Node(5 )
a = Node(6 )
a = Node(7 )
a = Node(8 )
a = Node(9 )
print(is_full_binary_tree(__lowerCAmelCase ) )
print(depth_of_tree(__lowerCAmelCase ) )
print("""Tree is: """ )
display(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 350 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__UpperCamelCase : Any = datasets.utils.logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase ( datasets.BuilderConfig ):
UpperCamelCase__ = None
UpperCamelCase__ = "utf-8"
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = True # deprecated
UpperCamelCase__ = None # deprecated
UpperCamelCase__ = 10 << 20 # 10MB
UpperCamelCase__ = None
class __lowerCAmelCase ( datasets.ArrowBasedBuilder ):
UpperCamelCase__ = JsonConfig
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
a = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
a = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__magic_name__ , (str, list, tuple) ):
a = data_files
if isinstance(__magic_name__ , __magic_name__ ):
a = [files]
a = [dl_manager.iter_files(__magic_name__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
a = []
for split_name, files in data_files.items():
if isinstance(__magic_name__ , __magic_name__ ):
a = [files]
a = [dl_manager.iter_files(__magic_name__ ) for file in files]
splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"""files""": files} ) )
return splits
def lowerCamelCase__ ( self :List[str] , __magic_name__ :pa.Table ):
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
a = self.config.features.arrow_schema.field(__magic_name__ ).type
a = pa_table.append_column(__magic_name__ , pa.array([None] * len(__magic_name__ ) , type=__magic_name__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
a = table_cast(__magic_name__ , self.config.features.arrow_schema )
return pa_table
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Union[str, Any] ):
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
a = json.load(__magic_name__ )
# We keep only the field we are interested in
a = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__magic_name__ , (list, tuple) ):
a = set().union(*[row.keys() for row in dataset] )
a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
else:
a = dataset
a = pa.Table.from_pydict(__magic_name__ )
yield file_idx, self._cast_table(__magic_name__ )
# If the file has one json object per line
else:
with open(__magic_name__ , """rb""" ) as f:
a = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
a = max(self.config.chunksize // 32 , 16 << 10 )
a = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
a = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__magic_name__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
a = batch.decode(self.config.encoding , errors=__magic_name__ ).encode("""utf-8""" )
try:
while True:
try:
a = paj.read_json(
io.BytesIO(__magic_name__ ) , read_options=paj.ReadOptions(block_size=__magic_name__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__magic_name__ , pa.ArrowInvalid )
and "straddling" not in str(__magic_name__ )
or block_size > len(__magic_name__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'Batch of {len(__magic_name__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
a = json.load(__magic_name__ )
except json.JSONDecodeError:
logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__magic_name__ , __magic_name__ ): # list is the only sequence type supported in JSON
try:
a = set().union(*[row.keys() for row in dataset] )
a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
a = pa.Table.from_pydict(__magic_name__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' )
raise ValueError(F'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(__magic_name__ )
break
else:
logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' )
raise ValueError(
F'Not able to read records in the JSON file at {file}. '
F'You should probably indicate the field of the JSON file containing your records. '
F'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
F'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__magic_name__ )
batch_idx += 1
| 347 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCAmelCase ( __UpperCamelCase ):
UpperCamelCase__ = ['''image_processor''', '''tokenizer''']
UpperCamelCase__ = '''ViTImageProcessor'''
UpperCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self :Optional[Any] , __magic_name__ :Union[str, Any]=None , __magic_name__ :Dict=None , **__magic_name__ :Tuple ):
'''simple docstring'''
a = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
a = kwargs.pop("""feature_extractor""" )
a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self :Optional[Any] , __magic_name__ :List[Any]=None , __magic_name__ :Optional[Any]=None , __magic_name__ :Any=None , __magic_name__ :Optional[Any]=None , **__magic_name__ :int ):
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("""You have to specify either text, visual prompt or images.""" )
if text is not None and visual_prompt is not None:
raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""" )
if text is not None:
a = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if visual_prompt is not None:
a = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if images is not None:
a = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if visual_prompt is not None and images is not None:
a = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
a = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowerCamelCase__ ( self :Optional[Any] , *__magic_name__ :Any , **__magic_name__ :Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase__ ( self :List[str] , *__magic_name__ :List[Any] , **__magic_name__ :Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 351 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self :List[str] , __magic_name__ :int="</s>" , __magic_name__ :List[Any]="<unk>" , __magic_name__ :Optional[Any]="<pad>" , __magic_name__ :Optional[int]=125 , __magic_name__ :List[str]=None , **__magic_name__ :List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
a = [F'<extra_id_{i}>' for i in range(__magic_name__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
a = len(set(filter(lambda __magic_name__ : bool("""extra_id""" in str(__magic_name__ ) ) , __magic_name__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token
super().__init__(
eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , extra_ids=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , )
a = extra_ids
a = 2**8 # utf is 8 bits
# define special tokens dict
a = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
a = len(self.special_tokens_encoder )
a = len(__magic_name__ )
for i, token in enumerate(__magic_name__ ):
a = self.vocab_size + i - n
a = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def lowerCamelCase__ ( self :Any , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None , __magic_name__ :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__magic_name__ )) + [1]
return ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1]
def lowerCamelCase__ ( self :str , __magic_name__ :List[int] ):
'''simple docstring'''
if len(__magic_name__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = self._add_eos_if_not_present(__magic_name__ )
if token_ids_a is None:
return token_ids_a
else:
a = self._add_eos_if_not_present(__magic_name__ )
return token_ids_a + token_ids_a
def lowerCamelCase__ ( self :List[str] , __magic_name__ :str ):
'''simple docstring'''
a = [chr(__magic_name__ ) for i in text.encode("""utf-8""" )]
return tokens
def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ):
'''simple docstring'''
if token in self.special_tokens_encoder:
a = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
a = self.added_tokens_encoder[token]
elif len(__magic_name__ ) != 1:
a = self.unk_token_id
else:
a = ord(__magic_name__ ) + self._num_special_tokens
return token_id
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Dict ):
'''simple docstring'''
if index in self.special_tokens_decoder:
a = self.special_tokens_decoder[index]
else:
a = chr(index - self._num_special_tokens )
return token
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Optional[int] ):
'''simple docstring'''
a = b""""""
for token in tokens:
if token in self.special_tokens_decoder:
a = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
a = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
a = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
a = token.encode("""utf-8""" )
else:
a = bytes([ord(__magic_name__ )] )
bstring += tok_string
a = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :str , __magic_name__ :Optional[str] = None ):
'''simple docstring'''
return ()
| 347 | 0 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
a = tmp_path / """cache"""
a = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a = TextDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_text_dataset(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
a = tmp_path / """cache"""
a = {"""text""": """string"""}
a = features.copy() if features else default_expected_features
a = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a = TextDatasetReader(__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_text_dataset(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
a = tmp_path / """cache"""
a = {"""text""": """string"""}
a = TextDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase , split=__lowerCamelCase ).read()
_check_text_dataset(__lowerCamelCase , __lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
if issubclass(__lowerCamelCase , __lowerCamelCase ):
a = text_path
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
a = [text_path]
a = tmp_path / """cache"""
a = {"""text""": """string"""}
a = TextDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_text_dataset(__lowerCamelCase , __lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=("train",) ) -> str:
assert isinstance(__lowerCamelCase , __lowerCamelCase )
for split in splits:
a = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
a = tmp_path / """cache"""
a = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a = TextDatasetReader({"""train""": text_path} , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_text_datasetdict(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
a = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
a = {"""text""": """string"""}
a = features.copy() if features else default_expected_features
a = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a = TextDatasetReader({"""train""": text_path} , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_text_datasetdict(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
if split:
a = {split: text_path}
else:
a = """train"""
a = {"""train""": text_path, """test""": text_path}
a = tmp_path / """cache"""
a = {"""text""": """string"""}
a = TextDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_text_datasetdict(__lowerCamelCase , __lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 352 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCAmelCase :
def __init__( self :Optional[int] , __magic_name__ :str , __magic_name__ :int=2 , __magic_name__ :List[str]=3 , __magic_name__ :Optional[int]=4 , __magic_name__ :str=2 , __magic_name__ :Any=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Dict=True , __magic_name__ :List[Any]=99 , __magic_name__ :Dict=36 , __magic_name__ :Optional[Any]=3 , __magic_name__ :str=4 , __magic_name__ :Optional[Any]=37 , __magic_name__ :Dict="gelu" , __magic_name__ :Any=0.1 , __magic_name__ :Union[str, Any]=0.1 , __magic_name__ :Dict=512 , __magic_name__ :str=16 , __magic_name__ :List[Any]=2 , __magic_name__ :Tuple=0.02 , __magic_name__ :Any=6 , __magic_name__ :Optional[int]=6 , __magic_name__ :Tuple=3 , __magic_name__ :str=4 , __magic_name__ :List[str]=None , __magic_name__ :str=1000 , ):
'''simple docstring'''
a = parent
a = batch_size
a = num_channels
a = image_size
a = patch_size
a = text_seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = coordinate_size
a = shape_size
a = num_labels
a = num_choices
a = scope
a = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a = text_seq_length
a = (image_size // patch_size) ** 2 + 1
a = self.text_seq_length + self.image_seq_length
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
a = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a = bbox[i, j, 3]
a = bbox[i, j, 1]
a = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a = bbox[i, j, 2]
a = bbox[i, j, 0]
a = t
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.text_seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
a = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase__ ( self :int , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :List[str] , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int ):
'''simple docstring'''
a = LayoutLMvaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# text + image
a = model(__magic_name__ , pixel_values=__magic_name__ )
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , token_type_ids=__magic_name__ )
a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
a = model(__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
a = model(pixel_values=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :List[Any] , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :List[str] , __magic_name__ :List[str] ):
'''simple docstring'''
a = self.num_labels
a = LayoutLMvaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Dict , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :List[str] , __magic_name__ :Tuple ):
'''simple docstring'''
a = self.num_labels
a = LayoutLMvaForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :str , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :str , __magic_name__ :List[str] , __magic_name__ :Optional[int] , __magic_name__ :Optional[Any] ):
'''simple docstring'''
a = LayoutLMvaForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :List[Any] ):
'''simple docstring'''
return True
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = LayoutLMvaModelTester(self )
a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :Any=False ):
'''simple docstring'''
a = copy.deepcopy(__magic_name__ )
if model_class in get_values(__magic_name__ ):
a = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__magic_name__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__magic_name__ ):
a = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
elif model_class in get_values(__magic_name__ ):
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
elif model_class in [
*get_values(__magic_name__ ),
]:
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
elif model_class in [
*get_values(__magic_name__ ),
]:
a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__magic_name__ , )
return inputs_dict
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a = type
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@slow
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = LayoutLMvaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __A ( ) -> str:
a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(__magic_name__ )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=__magic_name__ , return_tensors="""pt""" ).pixel_values.to(__magic_name__ )
a = torch.tensor([[1, 2]] )
a = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
a = model(
input_ids=input_ids.to(__magic_name__ ) , bbox=bbox.to(__magic_name__ ) , pixel_values=pixel_values.to(__magic_name__ ) , )
# verify the logits
a = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ )
a = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 347 | 0 |
from statistics import mean, stdev
def __A ( __lowerCamelCase , __lowerCamelCase = 3 ) -> str:
a = min(lowerCamelCase__ )
a = max(lowerCamelCase__ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , lowerCamelCase__ ) for x in data]
def __A ( __lowerCamelCase , __lowerCamelCase = 3 ) -> str:
a = mean(lowerCamelCase__ )
a = stdev(lowerCamelCase__ )
# standardize data
return [round((x - mu) / (sigma) , lowerCamelCase__ ) for x in data]
| 353 |
from copy import deepcopy
class __lowerCAmelCase :
def __init__( self :Union[str, Any] , __magic_name__ :list[int] | None = None , __magic_name__ :int | None = None ):
'''simple docstring'''
if arr is None and size is not None:
a = size
a = [0] * size
elif arr is not None:
self.init(__magic_name__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def lowerCamelCase__ ( self :Dict , __magic_name__ :list[int] ):
'''simple docstring'''
a = len(__magic_name__ )
a = deepcopy(__magic_name__ )
for i in range(1 , self.size ):
a = self.next_(__magic_name__ )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
a = self.next_(__magic_name__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCamelCase__ ( __magic_name__ :int ):
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def lowerCamelCase__ ( __magic_name__ :int ):
'''simple docstring'''
return index - (index & (-index))
def lowerCamelCase__ ( self :Any , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
a = self.next_(__magic_name__ )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
self.add(__magic_name__ , value - self.get(__magic_name__ ) )
def lowerCamelCase__ ( self :int , __magic_name__ :int ):
'''simple docstring'''
if right == 0:
return 0
a = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
a = self.prev(__magic_name__ )
return result
def lowerCamelCase__ ( self :int , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
return self.prefix(__magic_name__ ) - self.prefix(__magic_name__ )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :int ):
'''simple docstring'''
return self.query(__magic_name__ , index + 1 )
def lowerCamelCase__ ( self :Dict , __magic_name__ :int ):
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
a = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
a = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
__UpperCamelCase : Optional[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
__UpperCamelCase : List[Any] = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
@lru_cache()
def __A ( ) -> Any:
a = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
a = bs[:]
a = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCAmelCase )
cs.append(2**8 + n )
n += 1
a = [chr(_lowerCAmelCase ) for n in cs]
return dict(zip(_lowerCAmelCase , _lowerCAmelCase ) )
def __A ( __lowerCamelCase ) -> Tuple:
a = set()
a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a = char
return pairs
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self :str , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[int] , __magic_name__ :Any="replace" , __magic_name__ :str="<s>" , __magic_name__ :Dict="</s>" , __magic_name__ :Optional[int]="</s>" , __magic_name__ :List[str]="<s>" , __magic_name__ :Optional[Any]="<unk>" , __magic_name__ :List[str]="<pad>" , __magic_name__ :Optional[Any]="<mask>" , __magic_name__ :Dict=False , **__magic_name__ :Optional[int] , ):
'''simple docstring'''
a = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else bos_token
a = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else eos_token
a = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else sep_token
a = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else cls_token
a = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else unk_token
a = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
super().__init__(
errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
a = json.load(_lowerCAmelCase )
a = {v: k for k, v in self.encoder.items()}
a = errors # how to handle errors in decoding
a = bytes_to_unicode()
a = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
a = merges_handle.read().split("""\n""" )[1:-1]
a = [tuple(merge.split() ) for merge in bpe_merges]
a = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
a = {}
a = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a = re.compile(r"""\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
return len(self.encoder )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase__ ( self :Dict , __magic_name__ :Optional[Any] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
a = tuple(_lowerCAmelCase )
a = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
a = min(_lowerCAmelCase , key=lambda __magic_name__ : self.bpe_ranks.get(_lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
a , a = bigram
a = []
a = 0
while i < len(_lowerCAmelCase ):
try:
a = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a = tuple(_lowerCAmelCase )
a = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
a = get_pairs(_lowerCAmelCase )
a = """ """.join(_lowerCAmelCase )
a = word
return word
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Optional[Any] ):
'''simple docstring'''
a = []
for token in re.findall(self.pat , _lowerCAmelCase ):
a = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCAmelCase ).split(""" """ ) )
return bpe_tokens
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Dict ):
'''simple docstring'''
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self :Any , __magic_name__ :str ):
'''simple docstring'''
return self.decoder.get(_lowerCAmelCase )
def lowerCamelCase__ ( self :Any , __magic_name__ :Tuple ):
'''simple docstring'''
a = """""".join(_lowerCAmelCase )
a = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCamelCase__ ( self :Tuple , __magic_name__ :str , __magic_name__ :Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
a = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" )
a = 0
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __magic_name__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
a = token_index
writer.write(""" """.join(_lowerCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCamelCase__ ( self :List[str] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a = [self.cls_token_id]
a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None , __magic_name__ :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def lowerCamelCase__ ( self :int , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :Optional[Any] , __magic_name__ :int=False , **__magic_name__ :Tuple ):
'''simple docstring'''
a = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCAmelCase ) > 0 and not text[0].isspace()):
a = """ """ + text
return (text, kwargs)
| 354 |
from __future__ import annotations
from typing import Generic, TypeVar
__UpperCamelCase : Union[str, Any] = TypeVar("T")
class __lowerCAmelCase ( Generic[T] ):
def __init__( self :Tuple , __magic_name__ :T ):
'''simple docstring'''
a = data
a = self
a = 0
class __lowerCAmelCase ( Generic[T] ):
def __init__( self :Tuple ):
'''simple docstring'''
a = {}
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :T ):
'''simple docstring'''
a = DisjointSetTreeNode(__magic_name__ )
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :T ):
'''simple docstring'''
a = self.map[data]
if elem_ref != elem_ref.parent:
a = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :DisjointSetTreeNode[T] , __magic_name__ :DisjointSetTreeNode[T] ):
'''simple docstring'''
if nodea.rank > nodea.rank:
a = nodea
else:
a = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :T , __magic_name__ :T ):
'''simple docstring'''
self.link(self.find_set(__magic_name__ ) , self.find_set(__magic_name__ ) )
class __lowerCAmelCase ( Generic[T] ):
def __init__( self :Union[str, Any] ):
'''simple docstring'''
a = {}
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :T ):
'''simple docstring'''
if node not in self.connections:
a = {}
def lowerCamelCase__ ( self :Any , __magic_name__ :T , __magic_name__ :T , __magic_name__ :int ):
'''simple docstring'''
self.add_node(__magic_name__ )
self.add_node(__magic_name__ )
a = weight
a = weight
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = []
a = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __magic_name__ : x[2] )
# creating the disjoint set
a = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__magic_name__ )
# MST generation
a = 0
a = 0
a = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
a , a , a = edges[index]
index += 1
a = disjoint_set.find_set(__magic_name__ )
a = disjoint_set.find_set(__magic_name__ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__magic_name__ , __magic_name__ , __magic_name__ )
disjoint_set.union(__magic_name__ , __magic_name__ )
return graph
| 347 | 0 |
__UpperCamelCase : List[Any] = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__UpperCamelCase : int = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__UpperCamelCase : Any = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__UpperCamelCase : Dict = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__UpperCamelCase : List[str] = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__UpperCamelCase : List[str] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__UpperCamelCase : Dict = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__UpperCamelCase : str = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 355 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = tempfile.mkdtemp()
a = BlipImageProcessor()
a = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
a = BlipProcessor(__magic_name__ , __magic_name__ )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self :List[Any] , **__magic_name__ :Union[str, Any] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer
def lowerCamelCase__ ( self :str , **__magic_name__ :List[str] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
a = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
a = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = self.prepare_image_inputs()
a = image_processor(__magic_name__ , return_tensors="""np""" )
a = processor(images=__magic_name__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = """lower newer"""
a = processor(text=__magic_name__ )
a = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = """lower newer"""
a = self.prepare_image_inputs()
a = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__magic_name__ )
a = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = """lower newer"""
a = self.prepare_image_inputs()
a = processor(text=__magic_name__ , images=__magic_name__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 347 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( snake_case_ , unittest.TestCase ):
UpperCamelCase__ = XLMTokenizer
UpperCamelCase__ = False
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
a = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
a = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__magic_name__ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :Union[str, Any] ):
'''simple docstring'''
a = 'lower newer'
a = 'lower newer'
return input_text, output_text
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = XLMTokenizer(self.vocab_file , self.merges_file )
a = 'lower'
a = ['low', 'er</w>']
a = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
a = tokens + ['<unk>']
a = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
@slow
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
a = tokenizer.encode("""sequence builders""" , add_special_tokens=__magic_name__ )
a = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__magic_name__ )
a = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
a = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 356 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
UpperCamelCase__ = '''nat'''
UpperCamelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :Any , __magic_name__ :int=4 , __magic_name__ :Dict=3 , __magic_name__ :List[str]=64 , __magic_name__ :Optional[int]=[3, 4, 6, 5] , __magic_name__ :int=[2, 4, 8, 16] , __magic_name__ :str=7 , __magic_name__ :Tuple=3.0 , __magic_name__ :Dict=True , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.1 , __magic_name__ :Optional[Any]="gelu" , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :Tuple=1E-5 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :int=None , __magic_name__ :Any=None , **__magic_name__ :Dict , ):
'''simple docstring'''
super().__init__(**__magic_name__ )
a = patch_size
a = num_channels
a = embed_dim
a = depths
a = len(__magic_name__ )
a = num_heads
a = kernel_size
a = mlp_ratio
a = qkv_bias
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = drop_path_rate
a = hidden_act
a = layer_norm_eps
a = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) )
a = layer_scale_init_value
a = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(__magic_name__ ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
| 347 | 0 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__UpperCamelCase : List[str] = datasets.load_iris()
__UpperCamelCase : Optional[Any] = np.array(data["data"])
__UpperCamelCase : str = np.array(data["target"])
__UpperCamelCase : List[Any] = data["target_names"]
__UpperCamelCase : Union[str, Any] = train_test_split(X, y)
def __A ( __lowerCamelCase , __lowerCamelCase ) -> List[str]:
return np.linalg.norm(np.array(__lowerCamelCase ) - np.array(__lowerCamelCase ) )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=5 ) -> Dict:
a = zip(__lowerCamelCase , __lowerCamelCase )
# List of distances of all points from the point to be classified
a = []
for data_point in data:
a = euclidean_distance(data_point[0] , __lowerCamelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
a = [i[1] for i in sorted(__lowerCamelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
a = Counter(__lowerCamelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 357 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
a = flax_key_tuple[:-1] + ("""weight""",)
a = torch.permute(__lowerCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ):
# linear layer
a = flax_key_tuple[:-1] + ("""weight""",)
a = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
if "metadata" in layer:
a = layer.split("""metadata""" )
a = """""".join(split_layer[0] )[:-1]
a = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
a = layer.split("""kvstore""" )
a = """""".join(split_layer[0] )[:-1]
a = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
a = layer.split("""/""" )
a = """/""".join(split_layer[:-1] )
a = (split_layer[-1],)
if "kvstore/path" in layer:
a = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
a = """file"""
else:
a = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
a = rename_keys(__lowerCamelCase )
a = {}
for k, v in current_block.items():
a = v
a = new_current_block
torch.save(__lowerCamelCase , __lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = WEIGHTS_NAME ) -> List[str]:
a = convert_file_size_to_int(__lowerCamelCase )
a = []
a = {}
a = 0
a = 0
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
a = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
a = flatten_dict(__lowerCamelCase , sep="""/""" )
a = {}
for layer in checkpoint_info.keys():
a , a , a = get_key_and_tensorstore_dict(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if curr_real_layer_name in all_layers:
a = content
else:
a = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
a = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
a = torch.tensor(__lowerCamelCase )
a = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
a , a = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __lowerCamelCase )
a = """/""".join(__lowerCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
a = os.path.join(
__lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(__lowerCamelCase , __lowerCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
a = {}
a = 0
a = raw_weights.to(getattr(__lowerCamelCase , __lowerCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(__lowerCamelCase , __lowerCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__lowerCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
a = {}
a = {}
for idx, shard in enumerate(__lowerCamelCase ):
a = weights_name.replace(
""".bin""" , f'-{idx+1:05d}-of-{len(__lowerCamelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
a = shard
for key in shard:
a = shard_file
# Add the metadata
a = {"""total_size""": total_size}
a = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
a = json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase ) + """\n"""
f.write(__lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase : Any = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __A ( ) -> Tuple:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
a = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
a = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
a = TaTokenizer.from_pretrained("""t5-small""" )
a = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
a = tokenizer(__lowerCamelCase , return_tensors="""pt""" ).input_ids
a = model.generate(__lowerCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 347 | 0 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = 42
UpperCamelCase__ = None
UpperCamelCase__ = None
def __A ( __lowerCamelCase ) -> str:
def is_valid_tree(__lowerCamelCase ) -> bool:
if node is None:
return True
if not isinstance(A__ , A__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(A__ ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , A__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , A__ )
)
return is_binary_search_tree_recursive_check(A__ , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__UpperCamelCase : Union[str, Any] = (720, 1_280) # Height, Width
__UpperCamelCase : Any = (0.4, 0.6) # if height or width lower than this scale, drop it.
__UpperCamelCase : str = 1 / 100
__UpperCamelCase : Optional[int] = ""
__UpperCamelCase : List[Any] = ""
__UpperCamelCase : Union[str, Any] = ""
__UpperCamelCase : Tuple = 250
def __A ( ) -> None:
a , a = get_dataset(__lowerCamelCase , __lowerCamelCase )
for index in range(__lowerCamelCase ):
a = random.sample(range(len(__lowerCamelCase ) ) , 4 )
a , a , a = update_image_and_anno(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , filter_scale=__lowerCamelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
a = random_chars(32 )
a = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
a = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'
cva.imwrite(f'{file_root}.jpg' , __lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' )
a = []
for anno in new_annos:
a = anno[3] - anno[1]
a = anno[4] - anno[2]
a = anno[1] + width / 2
a = anno[2] + height / 2
a = f'{anno[0]} {x_center} {y_center} {width} {height}'
annos_list.append(__lowerCamelCase )
with open(f'{file_root}.txt' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> tuple[list, list]:
a = []
a = []
for label_file in glob.glob(os.path.join(__lowerCamelCase , """*.txt""" ) ):
a = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(__lowerCamelCase ) as in_file:
a = in_file.readlines()
a = os.path.join(__lowerCamelCase , f'{label_name}.jpg' )
a = []
for obj_list in obj_lists:
a = obj_list.rstrip("""\n""" ).split(""" """ )
a = float(obj[1] ) - float(obj[3] ) / 2
a = float(obj[2] ) - float(obj[4] ) / 2
a = float(obj[1] ) + float(obj[3] ) / 2
a = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__lowerCamelCase )
labels.append(__lowerCamelCase )
return img_paths, labels
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 0.0 , ) -> tuple[list, list, str]:
a = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
a = int(scale_x * output_size[1] )
a = int(scale_y * output_size[0] )
a = []
a = []
for i, index in enumerate(__lowerCamelCase ):
a = all_img_list[index]
path_list.append(__lowerCamelCase )
a = all_annos[index]
a = cva.imread(__lowerCamelCase )
if i == 0: # top-left
a = cva.resize(__lowerCamelCase , (divid_point_x, divid_point_y) )
a = img
for bbox in img_annos:
a = bbox[1] * scale_x
a = bbox[2] * scale_y
a = bbox[3] * scale_x
a = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
a = cva.resize(__lowerCamelCase , (output_size[1] - divid_point_x, divid_point_y) )
a = img
for bbox in img_annos:
a = scale_x + bbox[1] * (1 - scale_x)
a = bbox[2] * scale_y
a = scale_x + bbox[3] * (1 - scale_x)
a = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
a = cva.resize(__lowerCamelCase , (divid_point_x, output_size[0] - divid_point_y) )
a = img
for bbox in img_annos:
a = bbox[1] * scale_x
a = scale_y + bbox[2] * (1 - scale_y)
a = bbox[3] * scale_x
a = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
a = cva.resize(
__lowerCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
a = img
for bbox in img_annos:
a = scale_x + bbox[1] * (1 - scale_x)
a = scale_y + bbox[2] * (1 - scale_y)
a = scale_x + bbox[3] * (1 - scale_x)
a = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
a = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __A ( __lowerCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
a = ascii_lowercase + digits
return "".join(random.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 347 | 0 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__UpperCamelCase : List[str] = logging.get_logger(__name__)
class __lowerCAmelCase :
def __init__( self :str , __magic_name__ :str = None , __magic_name__ :uuid.UUID = None , __magic_name__ :Optional[int]=None , __magic_name__ :Dict=None ):
'''simple docstring'''
if not conversation_id:
a : Optional[Any] = uuid.uuida()
if past_user_inputs is None:
a : Dict = []
if generated_responses is None:
a : int = []
a : uuid.UUID = conversation_id
a : List[str] = past_user_inputs
a : List[str] = generated_responses
a : Optional[str] = text
def __eq__( self :List[Any] , __magic_name__ :Union[str, Any] ):
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCamelCase__ ( self :Dict , __magic_name__ :str , __magic_name__ :bool = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
a : Dict = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
a : Union[str, Any] = text
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
a : List[str] = None
def lowerCamelCase__ ( self :List[str] , __magic_name__ :str ):
'''simple docstring'''
self.generated_responses.append(__magic_name__ )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self :Union[str, Any] ):
'''simple docstring'''
a : Tuple = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
a : Tuple = 'user' if is_user else 'bot'
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
SCREAMING_SNAKE_CASE__ , r'''\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ''' , )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self :Optional[Any] , *__magic_name__ :List[str] , **__magic_name__ :Dict ):
'''simple docstring'''
super().__init__(*__magic_name__ , **__magic_name__ )
if self.tokenizer.pad_token_id is None:
a : Dict = self.tokenizer.eos_token
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Any=None , __magic_name__ :List[str]=None , __magic_name__ :List[Any]=None , **__magic_name__ :List[Any] ):
'''simple docstring'''
a : Tuple = {}
a : Union[str, Any] = {}
a : Dict = {}
if min_length_for_response is not None:
a : int = min_length_for_response
if minimum_tokens is not None:
a : List[str] = minimum_tokens
if "max_length" in generate_kwargs:
a : Optional[int] = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
a : int = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__magic_name__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self :Union[str, Any] , __magic_name__ :Union[Conversation, List[Conversation]] , __magic_name__ :str=0 , **__magic_name__ :Tuple ):
'''simple docstring'''
a : List[str] = super().__call__(__magic_name__ , num_workers=__magic_name__ , **__magic_name__ )
if isinstance(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) == 1:
return outputs[0]
return outputs
def lowerCamelCase__ ( self :Dict , __magic_name__ :Conversation , __magic_name__ :Tuple=32 ):
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"""Add user inputs with the conversation\'s `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
a : str = self.tokenizer._build_conversation_input_ids(__magic_name__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
a : Any = self._legacy_parse_and_tokenize(__magic_name__ )
if self.framework == "pt":
a : Tuple = torch.LongTensor([input_ids] )
elif self.framework == "tf":
a : str = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[Any] , __magic_name__ :str=10 , **__magic_name__ :str ):
'''simple docstring'''
a : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
a : List[Any] = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
a : List[Any] = max_length - minimum_tokens
a : Optional[Any] = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
a : str = model_inputs['attention_mask'][:, -trim:]
a : Tuple = model_inputs.pop("""conversation""" )
a : int = max_length
a : str = self.model.generate(**__magic_name__ , **__magic_name__ )
if self.model.config.is_encoder_decoder:
a : Union[str, Any] = 1
else:
a : Union[str, Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Union[str, Any] , __magic_name__ :Union[str, Any]=True ):
'''simple docstring'''
a : Optional[Any] = model_outputs['output_ids']
a : Optional[Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ , )
a : Union[str, Any] = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(__magic_name__ )
return conversation
def lowerCamelCase__ ( self :Any , __magic_name__ :Conversation ):
'''simple docstring'''
a : Tuple = self.tokenizer.eos_token_id
a : str = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) )
if len(__magic_name__ ) > self.tokenizer.model_max_length:
a : List[str] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 359 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Optional[Any] = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ["MobileNetV2FeatureExtractor"]
__UpperCamelCase : Tuple = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 347 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __lowerCAmelCase :
def __init__( self :Tuple , __magic_name__ :Optional[int] , __magic_name__ :List[str]=2 , __magic_name__ :Dict=32 , __magic_name__ :Optional[Any]=16 , __magic_name__ :int=3 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[str]=True , __magic_name__ :Any=32 , __magic_name__ :List[Any]=4 , __magic_name__ :Optional[int]=[0, 1, 2, 3] , __magic_name__ :List[Any]=4 , __magic_name__ :Optional[Any]=37 , __magic_name__ :str="gelu" , __magic_name__ :Any=0.1 , __magic_name__ :str=0.1 , __magic_name__ :Optional[int]=0.02 , __magic_name__ :Dict=3 , __magic_name__ :str=[1, 384, 24, 24] , __magic_name__ :List[Any]=True , __magic_name__ :Union[str, Any]=None , ):
'''simple docstring'''
a = parent
a = batch_size
a = image_size
a = patch_size
a = num_channels
a = is_training
a = use_labels
a = hidden_size
a = num_hidden_layers
a = backbone_out_indices
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = num_labels
a = backbone_featmap_shape
a = scope
a = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
a = (image_size // patch_size) ** 2
a = num_patches + 1
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def lowerCamelCase__ ( self :str , __magic_name__ :Tuple , __magic_name__ :str , __magic_name__ :Union[str, Any] ):
'''simple docstring'''
a = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
a = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self :int , __magic_name__ :Any , __magic_name__ :Optional[Any] , __magic_name__ :str ):
'''simple docstring'''
a = self.num_labels
a = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
a = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[Any] , __magic_name__ :List[Any] , __magic_name__ :Tuple ):
'''simple docstring'''
a = self.num_labels
a = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
a = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ):
UpperCamelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase__ = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = DPTModelTester(self )
a = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__UpperCamelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = True
if model_class in get_values(__UpperCamelCase ):
continue
a = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
a = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
a = model(**__UpperCamelCase ).loss
loss.backward()
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = False
a = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
a = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
a = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
a = model(**__UpperCamelCase ).loss
loss.backward()
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
a = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
a = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
a = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
pass
@slow
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
a = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = """add"""
with self.assertRaises(__UpperCamelCase ):
a = DPTForDepthEstimation(__UpperCamelCase )
def __A ( ) -> int:
a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
a = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
a = prepare_img()
a = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
a = model(**__UpperCamelCase )
a = outputs.predicted_depth
# verify the predicted depth
a = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
a = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCamelCase , atol=1E-4 ) )
| 360 |
def __A ( __lowerCamelCase ) -> bool:
if num < 0:
return False
a = num
a = 0
while num > 0:
a = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __lowerCAmelCase :
def __init__( self :Optional[int] , __magic_name__ :Optional[Any] , __magic_name__ :Any=13 , __magic_name__ :List[Any]=7 , __magic_name__ :Dict=True , __magic_name__ :List[str]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Optional[int]=99 , __magic_name__ :List[str]=32 , __magic_name__ :Optional[Any]=2 , __magic_name__ :str=4 , __magic_name__ :Union[str, Any]=37 , __magic_name__ :Dict="gelu" , __magic_name__ :int=0.1 , __magic_name__ :List[Any]=0.1 , __magic_name__ :List[str]=512 , __magic_name__ :Optional[Any]=16 , __magic_name__ :Optional[Any]=2 , __magic_name__ :Dict=0.02 , __magic_name__ :int=3 , __magic_name__ :Tuple=4 , __magic_name__ :Optional[int]=None , __magic_name__ :Union[str, Any]=0 , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
a = projection_dim
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
a = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :Optional[Any] , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int , __magic_name__ :Optional[Any] , __magic_name__ :str , __magic_name__ :Optional[int] ):
'''simple docstring'''
a = TFDPRContextEncoder(config=UpperCAmelCase__ )
a = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
a = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
a = model(UpperCAmelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCamelCase__ ( self :Dict , __magic_name__ :List[Any] , __magic_name__ :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :List[Any] ):
'''simple docstring'''
a = TFDPRQuestionEncoder(config=UpperCAmelCase__ )
a = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
a = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
a = model(UpperCAmelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :Optional[int] , __magic_name__ :List[str] , __magic_name__ :Union[str, Any] , __magic_name__ :List[Any] , __magic_name__ :List[Any] , __magic_name__ :Optional[int] , __magic_name__ :int ):
'''simple docstring'''
a = TFDPRReader(config=UpperCAmelCase__ )
a = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __lowercase , __lowercase , unittest.TestCase ):
UpperCamelCase__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = TFDPRModelTester(self )
a = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase__ )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase__ )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase__ )
@slow
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = TFDPRContextEncoder.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = TFDPRContextEncoder.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = TFDPRReader.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
a = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
a = model(UpperCAmelCase__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
a = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 361 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = CanineTokenizer
UpperCamelCase__ = False
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
super().setUp()
a = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def lowerCamelCase__ ( self :Tuple , **__magic_name__ :Dict ):
'''simple docstring'''
a = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
a = 1024
return tokenizer
@require_torch
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.canine_tokenizer
a = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
a = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.canine_tokenizer
a = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , __magic_name__ )
self.assertIn("""attention_mask""" , __magic_name__ )
self.assertIn("""token_type_ids""" , __magic_name__ )
@require_torch
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.canine_tokenizer
a = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
a = tokenizer(
text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a = tempfile.mkdtemp()
a = """ He is very happy, UNwant\u00E9d,running"""
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
a = tokenizer.__class__.from_pretrained(__magic_name__ )
a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
shutil.rmtree(__magic_name__ )
a = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a = tempfile.mkdtemp()
a = """ He is very happy, UNwant\u00E9d,running"""
a = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
a = chr(0Xe_0_0_7 )
additional_special_tokens.append(__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
a = tokenizer.__class__.from_pretrained(__magic_name__ )
a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
a = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__magic_name__ )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a , a = self.get_clean_sequence(__magic_name__ )
# a special token for Canine can be defined as follows:
a = 0Xe_0_0_5
a = chr(__magic_name__ )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
a = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , input_encoded + special_token_id )
a = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
self.assertTrue(special_token not in decoded )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a = chr(0Xe_0_0_5 )
a = chr(0Xe_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
a = tokenizer.tokenize(__magic_name__ )
a = tokenizer.tokenize(__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(token_a[0] , __magic_name__ )
self.assertEqual(token_a[0] , __magic_name__ )
@require_tokenizers
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
a = 0Xe_0_0_6
a = chr(__magic_name__ )
a = AddedToken(__magic_name__ , lstrip=__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__magic_name__ )
tokenizer.from_pretrained(__magic_name__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
a = json.load(__magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
a = json.load(__magic_name__ )
# a special token for Canine can be defined as follows:
a = 0Xe_0_0_6
a = chr(__magic_name__ )
a = [new_token_a]
a = [new_token_a]
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
a = 0Xe_0_0_7
a = chr(__magic_name__ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a = [AddedToken(__magic_name__ , lstrip=__magic_name__ )]
a = tokenizer_class.from_pretrained(
__magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a = """hello world"""
if self.space_between_special_tokens:
a = """[CLS] hello world [SEP]"""
else:
a = input
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__magic_name__ , [output, output.lower()] )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
a = """a"""
a = ord(__magic_name__ )
for attr in attributes_list:
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] )
a = 0Xe_0_0_6
a = chr(__magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
| 347 | 0 |
import math
import os
import sys
def __A ( __lowerCamelCase ) -> str:
a = """"""
try:
with open(__lowerCAmelCase , """rb""" ) as binary_file:
a = binary_file.read()
for dat in data:
a = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> None:
lexicon.pop(__lowerCAmelCase )
a = last_match_id
if math.loga(__lowerCAmelCase ).is_integer():
for curr_key in lexicon:
a = """0""" + lexicon[curr_key]
a = bin(__lowerCAmelCase )[2:]
def __A ( __lowerCamelCase ) -> str:
a = {"""0""": """0""", """1""": """1"""}
a = """""", """"""
a = len(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
a = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
index += 1
a = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
a = lexicon[curr_string]
result += last_match_id
return result
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
a = os.path.getsize(__lowerCAmelCase )
a = bin(__lowerCAmelCase )[2:]
a = len(__lowerCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def __A ( __lowerCamelCase , __lowerCamelCase ) -> None:
a = 8
try:
with open(__lowerCAmelCase , """wb""" ) as opened_file:
a = [
to_write[i : i + byte_length]
for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__lowerCAmelCase , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def __A ( __lowerCamelCase , __lowerCamelCase ) -> None:
a = read_file_binary(__lowerCAmelCase )
a = compress_data(__lowerCAmelCase )
a = add_file_length(__lowerCAmelCase , __lowerCAmelCase )
write_file_binary(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2]) | 362 |
def __A ( __lowerCamelCase ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 0 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__UpperCamelCase : Dict = logging.getLogger(__name__)
__UpperCamelCase : Optional[int] = tf.data.AUTOTUNE
def __A ( ) -> List[Any]:
a = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=lowercase__ , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=lowercase__ , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=lowercase__ , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=lowercase__ , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=lowercase__ , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=lowercase__ , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=lowercase__ , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=lowercase__ , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=lowercase__ , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=lowercase__ , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=lowercase__ , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=lowercase__ , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=lowercase__ , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=lowercase__ , required=lowercase__ , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=lowercase__ , help="""Model ID to upload to on the Hugging Face Hub.""" )
a = parser.parse_args()
return args
def __A ( __lowerCamelCase ) -> Union[str, Any]:
try:
if args.tpu_name:
a = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
a = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(lowercase__ )
tf.tpu.experimental.initialize_tpu_system(lowercase__ )
return tpu
def __A ( __lowerCamelCase ) -> Optional[int]:
a = 0
for file in file_list:
a = file.split("""/""" )[-1]
a = re.search(R"""-\d+-(\d+)\.tfrecord""" , lowercase__ ).group(1 )
a = int(lowercase__ )
num_samples += sample_count
return num_samples
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ) -> Optional[Any]:
a = count_samples(lowercase__ )
a = tf.data.Dataset.from_tensor_slices(lowercase__ )
if shuffle:
a = dataset.shuffle(len(lowercase__ ) )
a = tf.data.TFRecordDataset(lowercase__ , num_parallel_reads=lowercase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
a = dataset.apply(tf.data.experimental.assert_cardinality(lowercase__ ) )
a = dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
if shuffle:
assert shuffle_buffer_size is not None
a = dataset.shuffle(args.shuffle_buffer_size )
a = dataset.batch(lowercase__ , drop_remainder=lowercase__ )
a = dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
a = dataset.prefetch(lowercase__ )
return dataset
def __A ( __lowerCamelCase ) -> Tuple:
if not args.no_tpu:
a = initialize_tpu(lowercase__ )
a = tf.distribute.TPUStrategy(lowercase__ )
else:
a = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
a = AutoTokenizer.from_pretrained(args.tokenizer )
a = AutoConfig.from_pretrained(args.pretrained_model_config )
a = tokenizer.vocab_size
a = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' )
a = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' )
a = count_samples(lowercase__ )
a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
a = steps_per_epoch * args.num_epochs
with strategy.scope():
a = TFAutoModelForMaskedLM.from_config(lowercase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
a , a = create_optimizer(
num_train_steps=lowercase__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase__ , metrics=["""accuracy"""] )
def decode_fn(__lowerCamelCase ):
a = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase__ , lowercase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
a = DataCollatorForLanguageModeling(
tokenizer=lowercase__ , mlm_probability=args.mlm_probability , mlm=lowercase__ , return_tensors="""tf""" )
def mask_with_collator(__lowerCamelCase ):
# TF really needs an isin() function
a = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
a , a = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(lowercase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase__ , )
return batch
a = args.per_replica_batch_size * strategy.num_replicas_in_sync
a = prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
a = prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , )
a = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase__ ) )
model.fit(
lowercase__ , validation_data=lowercase__ , epochs=args.num_epochs , callbacks=lowercase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = parse_args()
main(args)
| 363 |
def __A ( __lowerCamelCase ) -> int:
if not numbers:
return 0
if not isinstance(__lowerCamelCase , (list, tuple) ) or not all(
isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
a = a = a = numbers[0]
for i in range(1 , len(__lowerCamelCase ) ):
# update the maximum and minimum subarray products
a = numbers[i]
if number < 0:
a , a = min_till_now, max_till_now
a = max(__lowerCamelCase , max_till_now * number )
a = min(__lowerCamelCase , min_till_now * number )
# update the maximum product found till now
a = max(__lowerCamelCase , __lowerCamelCase )
return max_prod
| 347 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = StableDiffusionInpaintPipeline
UpperCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase__ = frozenset([] )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
torch.manual_seed(0 )
a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_SCREAMING_SNAKE_CASE , )
a = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
a = CLIPTextModel(_SCREAMING_SNAKE_CASE )
a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
a = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCamelCase__ ( self :str , __magic_name__ :List[Any] , __magic_name__ :Union[str, Any]=0 ):
'''simple docstring'''
a = floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((64, 64) )
a = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(_SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
a = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
a = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
a = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = "cpu" # ensure determinism for the device-dependent torch.Generator
a = self.get_dummy_components()
a = StableDiffusionInpaintPipeline(**_SCREAMING_SNAKE_CASE )
a = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
a = sd_pipe(**_SCREAMING_SNAKE_CASE ).images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
a = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
a = "stabilityai/stable-diffusion-2-inpainting"
a = StableDiffusionInpaintPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
a = "Face of a yellow cat, high resolution, sitting on a park bench"
a = torch.manual_seed(0 )
a = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , output_type="""np""" , )
a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
a = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
a = "stabilityai/stable-diffusion-2-inpainting"
a = StableDiffusionInpaintPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , safety_checker=_SCREAMING_SNAKE_CASE , )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
a = "Face of a yellow cat, high resolution, sitting on a park bench"
a = torch.manual_seed(0 )
a = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , output_type="""np""" , )
a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
a = "stabilityai/stable-diffusion-2-inpainting"
a = PNDMScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder="""scheduler""" )
a = StableDiffusionInpaintPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
a = "Face of a yellow cat, high resolution, sitting on a park bench"
a = torch.manual_seed(0 )
a = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""np""" , )
a = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 364 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Optional[Any] = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 347 | 0 |
from collections.abc import Callable
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
a = a
a = b
if function(_a ) == 0: # one of the a or b is a root for the function
return a
elif function(_a ) == 0:
return b
elif (
function(_a ) * function(_a ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
a = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_a ) == 0:
return mid
elif function(_a ) * function(_a ) < 0:
a = mid
else:
a = mid
a = start + (end - start) / 2.0
return mid
def __A ( __lowerCamelCase ) -> Dict:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 365 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , __lowerCamelCase )
a = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
a = dataset_size < in_memory_max_size
else:
a = False
a = is_small_dataset(__lowerCamelCase )
assert result == expected
| 347 | 0 |
def __A ( __lowerCamelCase ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __A ( __lowerCamelCase ) -> bool:
a = int(number**0.5 )
return number == sq * sq
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> tuple[int, int]:
a = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
a = x_den * y_den * z_den
a = gcd(__lowerCamelCase , __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def __A ( __lowerCamelCase = 35 ) -> int:
a = set()
a = 42
a = Fraction(0 )
a = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
a = x_num * y_den + x_den * y_num
a = x_den * y_den
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
a = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
a = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
a = int(sqrt(__lowerCamelCase ) )
a = int(sqrt(__lowerCamelCase ) )
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
a = x_num * y_num
a = x_den * y_num + x_num * y_den
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
a = x_num * x_num * y_num * y_num
a = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
a = int(sqrt(__lowerCamelCase ) )
a = int(sqrt(__lowerCamelCase ) )
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase , __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'{solution() = }')
| 347 | 0 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCamelCase : Optional[Any] = False
class __lowerCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
a = torch.manual_seed(0 )
a = pipe.dual_guided(
prompt="""first prompt""" , image=_UpperCAmelCase , text_to_image_strength=0.75 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
a = VersatileDiffusionPipeline.from_pretrained(_UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
a = generator.manual_seed(0 )
a = pipe.dual_guided(
prompt="""first prompt""" , image=_UpperCAmelCase , text_to_image_strength=0.75 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
a = """cyberpunk 2077"""
a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
a = torch.manual_seed(0 )
a = pipe.dual_guided(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , text_to_image_strength=0.75 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
a = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
a = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
a = """A painting of a squirrel eating a burger """
a = torch.manual_seed(0 )
a = pipe.text_to_image(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
a = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
a = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
a = pipe.image_variation(_UpperCAmelCase , generator=_UpperCAmelCase , output_type="""numpy""" ).images
a = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
a = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 367 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self :List[str] , __magic_name__ :List[str] , __magic_name__ :List[Any]=13 , __magic_name__ :Any=7 , __magic_name__ :Optional[int]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :Any=99 , __magic_name__ :List[str]=32 , __magic_name__ :List[str]=5 , __magic_name__ :str=4 , __magic_name__ :str=37 , __magic_name__ :Optional[int]="gelu" , __magic_name__ :int=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :List[str]=512 , __magic_name__ :Tuple=16 , __magic_name__ :Tuple=2 , __magic_name__ :List[str]=0.02 , __magic_name__ :Any=4 , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_attention_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_choices
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_attention_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = FlaxRoFormerModelTester(self )
@slow
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__magic_name__ )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
a = jnp.array([[0, 1, 2, 3, 4, 5]] )
a = model(__magic_name__ )[0]
a = 5_0000
a = (1, 6, vocab_size)
self.assertEqual(output.shape , __magic_name__ )
a = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 347 | 0 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __A ( __lowerCamelCase=None , __lowerCamelCase=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=__lowerCamelCase )
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = field(
metadata={'''help''': '''The csv file to plot.'''} , )
UpperCamelCase__ = field(
default=_a , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
UpperCamelCase__ = field(
default=_a , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
UpperCamelCase__ = field(
default=_a , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
UpperCamelCase__ = field(
default=_a , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
UpperCamelCase__ = field(
default=_a , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
UpperCamelCase__ = list_field(
default=_a , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def __A ( __lowerCamelCase ) -> Any:
try:
int(__lowerCamelCase )
return True
except ValueError:
return False
def __A ( __lowerCamelCase ) -> Any:
try:
float(__lowerCamelCase )
return True
except ValueError:
return False
class __lowerCAmelCase :
def __init__( self :Tuple , __magic_name__ :List[Any] ):
'''simple docstring'''
a = args
a = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
a = csv.DictReader(__lowerCamelCase )
for row in reader:
a = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
a = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
a = float(row["""result"""] )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = plt.subplots()
a = """Time usage""" if self.args.is_time else """Memory usage"""
a = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
a = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
a = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
a = self.result_dict[model_name]["""result"""]
(a) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
a = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
a = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCamelCase , )
else:
a = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
(a) = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
a = np.asarray(__lowerCamelCase , __lowerCamelCase )[: len(__lowerCamelCase )]
plt.scatter(
__lowerCamelCase , __lowerCamelCase , label=F'{label_model_name} - {inner_loop_label}: {inner_loop_value}' )
plt.plot(__lowerCamelCase , __lowerCamelCase , """--""" )
title_str += F' {label_model_name} vs.'
a = title_str[:-4]
a = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(__lowerCamelCase )
plt.xlabel(__lowerCamelCase )
plt.ylabel(__lowerCamelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __A ( ) -> List[str]:
a = HfArgumentParser(__lowerCamelCase )
a = parser.parse_args_into_dataclasses()[0]
a = Plot(args=__lowerCamelCase )
plot.plot()
if __name__ == "__main__":
main()
| 368 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[int] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 347 | 0 |
"""simple docstring"""
def __A ( __lowerCamelCase ) -> str:
if number > 0:
raise ValueError("""input must be a negative integer""" )
a = len(bin(a__ )[3:] )
a = bin(abs(a__ ) - (1 << binary_number_length) )[3:]
a = (
(
"""1"""
+ """0""" * (binary_number_length - len(a__ ))
+ twos_complement_number
)
if number < 0
else """0"""
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = (IPNDMScheduler,)
UpperCamelCase__ = (('''num_inference_steps''', 50),)
def lowerCamelCase__ ( self :Any , **__magic_name__ :Optional[Any] ):
'''simple docstring'''
a = {"""num_train_timesteps""": 1000}
config.update(**__magic_name__ )
return config
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple=0 , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__magic_name__ )
a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
a = dummy_past_residuals[:]
if time_step is None:
a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
a = scheduler_class.from_pretrained(__magic_name__ )
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
a = dummy_past_residuals[:]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[Any]=0 , **__magic_name__ :Any ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals (must be after setting timesteps)
a = dummy_past_residuals[:]
if time_step is None:
a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
a = scheduler_class.from_pretrained(__magic_name__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residual (must be after setting timesteps)
a = dummy_past_residuals[:]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self :Optional[Any] , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__magic_name__ )
a = scheduler_class(**__magic_name__ )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter
scheduler.set_timesteps(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
a = model(__magic_name__ , __magic_name__ )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
a = model(__magic_name__ , __magic_name__ )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
return sample
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
if num_inference_steps is not None and hasattr(__magic_name__ , """set_timesteps""" ):
scheduler.set_timesteps(__magic_name__ )
elif num_inference_steps is not None and not hasattr(__magic_name__ , """set_timesteps""" ):
a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a = dummy_past_residuals[:]
a = scheduler.timesteps[5]
a = scheduler.timesteps[6]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__magic_name__ , time_step=__magic_name__ )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__magic_name__ , time_step=__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.full_loop()
a = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 347 | 0 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def __A ( ) -> Union[str, Any]:
a = 9
a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
a = kruskal(_lowerCamelCase , _lowerCamelCase )
a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_lowerCamelCase ) == sorted(_lowerCamelCase )
| 370 |
__UpperCamelCase : Dict = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def __A ( ) -> None:
a = input("""Enter message: """ )
a = input("""Enter key [alphanumeric]: """ )
a = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
a = """encrypt"""
a = encrypt_message(__lowerCamelCase , __lowerCamelCase )
elif mode.lower().startswith("""d""" ):
a = """decrypt"""
a = decrypt_message(__lowerCamelCase , __lowerCamelCase )
print(f'\n{mode.title()}ed message:' )
print(__lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
return translate_message(__lowerCamelCase , __lowerCamelCase , """encrypt""" )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
return translate_message(__lowerCamelCase , __lowerCamelCase , """decrypt""" )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
a = []
a = 0
a = key.upper()
for symbol in message:
a = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__lowerCamelCase ):
a = 0
else:
translated.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
if __name__ == "__main__":
main()
| 347 | 0 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1024 , __lowerCamelCase=1024 , __lowerCamelCase=False , **__lowerCamelCase ) -> str:
a = AutoTokenizer.from_pretrained(__lowerCamelCase )
a = SeqaSeqDataset(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , type_path="""train""" , **__lowerCamelCase )
a = tok.pad_token_id
def get_lens(__lowerCamelCase ):
a = tqdm(
DataLoader(__lowerCamelCase , batch_size=512 , num_workers=8 , shuffle=__lowerCamelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
a = []
for batch in dl:
a = batch['''input_ids'''].ne(__lowerCamelCase ).sum(1 ).tolist()
a = batch['''labels'''].ne(__lowerCamelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__lowerCamelCase , __lowerCamelCase ):
max_lens.append(max(__lowerCamelCase , __lowerCamelCase ) )
else:
max_lens.extend(__lowerCamelCase )
return max_lens
a = get_lens(__lowerCamelCase )
a = SeqaSeqDataset(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , type_path="""val""" , **__lowerCamelCase )
a = get_lens(__lowerCamelCase )
pickle_save(__lowerCamelCase , train_ds.len_file )
pickle_save(__lowerCamelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 371 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Tuple=13 , __magic_name__ :List[Any]=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :List[str]=True , __magic_name__ :str=99 , __magic_name__ :Optional[Any]=32 , __magic_name__ :Union[str, Any]=5 , __magic_name__ :Any=4 , __magic_name__ :int=37 , __magic_name__ :Tuple="gelu" , __magic_name__ :List[str]=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :Tuple=512 , __magic_name__ :Dict=16 , __magic_name__ :Optional[int]=2 , __magic_name__ :Optional[int]=0.02 , __magic_name__ :Optional[Any]=4 , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_attention_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_choices
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_attention_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = True
a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained("""roberta-base""" , from_pt=__magic_name__ )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
| 347 | 0 |
from __future__ import annotations
def __A ( __lowerCamelCase ) -> str:
if len(lowerCamelCase__ ) == 0:
return []
a , a = min(lowerCamelCase__ ), max(lowerCamelCase__ )
a = int(max_value - min_value ) + 1
a = [[] for _ in range(lowerCamelCase__ )]
for i in my_list:
buckets[int(i - min_value )].append(lowerCamelCase__ )
return [v for bucket in buckets for v in sorted(lowerCamelCase__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 350 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__UpperCamelCase : Any = datasets.utils.logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase ( datasets.BuilderConfig ):
UpperCamelCase__ = None
UpperCamelCase__ = "utf-8"
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = True # deprecated
UpperCamelCase__ = None # deprecated
UpperCamelCase__ = 10 << 20 # 10MB
UpperCamelCase__ = None
class __lowerCAmelCase ( datasets.ArrowBasedBuilder ):
UpperCamelCase__ = JsonConfig
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
a = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
a = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__magic_name__ , (str, list, tuple) ):
a = data_files
if isinstance(__magic_name__ , __magic_name__ ):
a = [files]
a = [dl_manager.iter_files(__magic_name__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
a = []
for split_name, files in data_files.items():
if isinstance(__magic_name__ , __magic_name__ ):
a = [files]
a = [dl_manager.iter_files(__magic_name__ ) for file in files]
splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"""files""": files} ) )
return splits
def lowerCamelCase__ ( self :List[str] , __magic_name__ :pa.Table ):
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
a = self.config.features.arrow_schema.field(__magic_name__ ).type
a = pa_table.append_column(__magic_name__ , pa.array([None] * len(__magic_name__ ) , type=__magic_name__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
a = table_cast(__magic_name__ , self.config.features.arrow_schema )
return pa_table
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Union[str, Any] ):
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
a = json.load(__magic_name__ )
# We keep only the field we are interested in
a = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__magic_name__ , (list, tuple) ):
a = set().union(*[row.keys() for row in dataset] )
a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
else:
a = dataset
a = pa.Table.from_pydict(__magic_name__ )
yield file_idx, self._cast_table(__magic_name__ )
# If the file has one json object per line
else:
with open(__magic_name__ , """rb""" ) as f:
a = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
a = max(self.config.chunksize // 32 , 16 << 10 )
a = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
a = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__magic_name__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
a = batch.decode(self.config.encoding , errors=__magic_name__ ).encode("""utf-8""" )
try:
while True:
try:
a = paj.read_json(
io.BytesIO(__magic_name__ ) , read_options=paj.ReadOptions(block_size=__magic_name__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__magic_name__ , pa.ArrowInvalid )
and "straddling" not in str(__magic_name__ )
or block_size > len(__magic_name__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'Batch of {len(__magic_name__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
a = json.load(__magic_name__ )
except json.JSONDecodeError:
logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__magic_name__ , __magic_name__ ): # list is the only sequence type supported in JSON
try:
a = set().union(*[row.keys() for row in dataset] )
a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
a = pa.Table.from_pydict(__magic_name__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' )
raise ValueError(F'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(__magic_name__ )
break
else:
logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' )
raise ValueError(
F'Not able to read records in the JSON file at {file}. '
F'You should probably indicate the field of the JSON file containing your records. '
F'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
F'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__magic_name__ )
batch_idx += 1
| 347 | 0 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __A ( __lowerCamelCase ) -> list[list[float]]:
a = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_SCREAMING_SNAKE_CASE ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
a = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
a = [[0.0, 0.0], [0.0, 0.0]]
a = matrix[1][1], matrix[0][0]
a = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_SCREAMING_SNAKE_CASE ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_SCREAMING_SNAKE_CASE ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
a = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
a = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
a = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
a = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
a = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
a = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
a = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
a = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
a = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
a = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
a = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
a = array(_SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
a = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
a = array(_SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_SCREAMING_SNAKE_CASE )
# Calculate the inverse of the matrix
return [[float(d(_SCREAMING_SNAKE_CASE ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 351 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self :List[str] , __magic_name__ :int="</s>" , __magic_name__ :List[Any]="<unk>" , __magic_name__ :Optional[Any]="<pad>" , __magic_name__ :Optional[int]=125 , __magic_name__ :List[str]=None , **__magic_name__ :List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
a = [F'<extra_id_{i}>' for i in range(__magic_name__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
a = len(set(filter(lambda __magic_name__ : bool("""extra_id""" in str(__magic_name__ ) ) , __magic_name__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token
super().__init__(
eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , extra_ids=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , )
a = extra_ids
a = 2**8 # utf is 8 bits
# define special tokens dict
a = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
a = len(self.special_tokens_encoder )
a = len(__magic_name__ )
for i, token in enumerate(__magic_name__ ):
a = self.vocab_size + i - n
a = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def lowerCamelCase__ ( self :Any , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None , __magic_name__ :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__magic_name__ )) + [1]
return ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1]
def lowerCamelCase__ ( self :str , __magic_name__ :List[int] ):
'''simple docstring'''
if len(__magic_name__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = self._add_eos_if_not_present(__magic_name__ )
if token_ids_a is None:
return token_ids_a
else:
a = self._add_eos_if_not_present(__magic_name__ )
return token_ids_a + token_ids_a
def lowerCamelCase__ ( self :List[str] , __magic_name__ :str ):
'''simple docstring'''
a = [chr(__magic_name__ ) for i in text.encode("""utf-8""" )]
return tokens
def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ):
'''simple docstring'''
if token in self.special_tokens_encoder:
a = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
a = self.added_tokens_encoder[token]
elif len(__magic_name__ ) != 1:
a = self.unk_token_id
else:
a = ord(__magic_name__ ) + self._num_special_tokens
return token_id
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Dict ):
'''simple docstring'''
if index in self.special_tokens_decoder:
a = self.special_tokens_decoder[index]
else:
a = chr(index - self._num_special_tokens )
return token
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Optional[int] ):
'''simple docstring'''
a = b""""""
for token in tokens:
if token in self.special_tokens_decoder:
a = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
a = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
a = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
a = token.encode("""utf-8""" )
else:
a = bytes([ord(__magic_name__ )] )
bstring += tok_string
a = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :str , __magic_name__ :Optional[str] = None ):
'''simple docstring'''
return ()
| 347 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = '''open-llama'''
def __init__( self :int , __magic_name__ :Any=10_0000 , __magic_name__ :str=4096 , __magic_name__ :Dict=1_1008 , __magic_name__ :Tuple=32 , __magic_name__ :Optional[int]=32 , __magic_name__ :int="silu" , __magic_name__ :str=2048 , __magic_name__ :Union[str, Any]=0.02 , __magic_name__ :List[str]=1E-6 , __magic_name__ :Any=True , __magic_name__ :Optional[int]=0 , __magic_name__ :Dict=1 , __magic_name__ :Optional[Any]=2 , __magic_name__ :str=False , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=0.1 , __magic_name__ :int=0.1 , __magic_name__ :str=True , __magic_name__ :Any=True , __magic_name__ :Any=None , **__magic_name__ :int , ):
'''simple docstring'''
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = intermediate_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = initializer_range
a = rms_norm_eps
a = use_cache
a = kwargs.pop(
"""use_memorry_efficient_attention""" , __magic_name__ )
a = hidden_dropout_prob
a = attention_dropout_prob
a = use_stable_embedding
a = shared_input_output_embedding
a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , **__magic_name__ , )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __magic_name__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'got {self.rope_scaling}' )
a = self.rope_scaling.get("""type""" , __magic_name__ )
a = self.rope_scaling.get("""factor""" , __magic_name__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(__magic_name__ , __magic_name__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 352 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCAmelCase :
def __init__( self :Optional[int] , __magic_name__ :str , __magic_name__ :int=2 , __magic_name__ :List[str]=3 , __magic_name__ :Optional[int]=4 , __magic_name__ :str=2 , __magic_name__ :Any=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Dict=True , __magic_name__ :List[Any]=99 , __magic_name__ :Dict=36 , __magic_name__ :Optional[Any]=3 , __magic_name__ :str=4 , __magic_name__ :Optional[Any]=37 , __magic_name__ :Dict="gelu" , __magic_name__ :Any=0.1 , __magic_name__ :Union[str, Any]=0.1 , __magic_name__ :Dict=512 , __magic_name__ :str=16 , __magic_name__ :List[Any]=2 , __magic_name__ :Tuple=0.02 , __magic_name__ :Any=6 , __magic_name__ :Optional[int]=6 , __magic_name__ :Tuple=3 , __magic_name__ :str=4 , __magic_name__ :List[str]=None , __magic_name__ :str=1000 , ):
'''simple docstring'''
a = parent
a = batch_size
a = num_channels
a = image_size
a = patch_size
a = text_seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = coordinate_size
a = shape_size
a = num_labels
a = num_choices
a = scope
a = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a = text_seq_length
a = (image_size // patch_size) ** 2 + 1
a = self.text_seq_length + self.image_seq_length
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
a = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a = bbox[i, j, 3]
a = bbox[i, j, 1]
a = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a = bbox[i, j, 2]
a = bbox[i, j, 0]
a = t
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.text_seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
a = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase__ ( self :int , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :List[str] , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int ):
'''simple docstring'''
a = LayoutLMvaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# text + image
a = model(__magic_name__ , pixel_values=__magic_name__ )
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , token_type_ids=__magic_name__ )
a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
a = model(__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
a = model(pixel_values=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :List[Any] , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :List[str] , __magic_name__ :List[str] ):
'''simple docstring'''
a = self.num_labels
a = LayoutLMvaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Dict , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :List[str] , __magic_name__ :Tuple ):
'''simple docstring'''
a = self.num_labels
a = LayoutLMvaForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :str , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :str , __magic_name__ :List[str] , __magic_name__ :Optional[int] , __magic_name__ :Optional[Any] ):
'''simple docstring'''
a = LayoutLMvaForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :List[Any] ):
'''simple docstring'''
return True
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = LayoutLMvaModelTester(self )
a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :Any=False ):
'''simple docstring'''
a = copy.deepcopy(__magic_name__ )
if model_class in get_values(__magic_name__ ):
a = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__magic_name__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__magic_name__ ):
a = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
elif model_class in get_values(__magic_name__ ):
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
elif model_class in [
*get_values(__magic_name__ ),
]:
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
elif model_class in [
*get_values(__magic_name__ ),
]:
a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__magic_name__ , )
return inputs_dict
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a = type
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@slow
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = LayoutLMvaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __A ( ) -> str:
a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(__magic_name__ )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=__magic_name__ , return_tensors="""pt""" ).pixel_values.to(__magic_name__ )
a = torch.tensor([[1, 2]] )
a = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
a = model(
input_ids=input_ids.to(__magic_name__ ) , bbox=bbox.to(__magic_name__ ) , pixel_values=pixel_values.to(__magic_name__ ) , )
# verify the logits
a = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ )
a = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 347 | 0 |
from __future__ import annotations
from math import ceil, floor, sqrt
def __A ( __lowerCamelCase = 200_0000 ) -> str:
a = [0]
a = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
a = 0
# the area corresponding to the grid that gives the product closest to target
a = 0
# an estimate of b, using the quadratic formula
a = 42
# the largest integer less than b_estimate
a = 42
# the largest integer less than b_estimate
a = 42
# the triangle number corresponding to b_floor
a = 42
# the triangle number corresponding to b_ceil
a = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
a = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
a = floor(UpperCAmelCase_ )
a = ceil(UpperCAmelCase_ )
a = triangle_numbers[b_floor]
a = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
a = triangle_b_first_guess * triangle_a
a = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
a = triangle_b_second_guess * triangle_a
a = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'{solution() = }')
| 353 |
from copy import deepcopy
class __lowerCAmelCase :
def __init__( self :Union[str, Any] , __magic_name__ :list[int] | None = None , __magic_name__ :int | None = None ):
'''simple docstring'''
if arr is None and size is not None:
a = size
a = [0] * size
elif arr is not None:
self.init(__magic_name__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def lowerCamelCase__ ( self :Dict , __magic_name__ :list[int] ):
'''simple docstring'''
a = len(__magic_name__ )
a = deepcopy(__magic_name__ )
for i in range(1 , self.size ):
a = self.next_(__magic_name__ )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
a = self.next_(__magic_name__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCamelCase__ ( __magic_name__ :int ):
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def lowerCamelCase__ ( __magic_name__ :int ):
'''simple docstring'''
return index - (index & (-index))
def lowerCamelCase__ ( self :Any , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
a = self.next_(__magic_name__ )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
self.add(__magic_name__ , value - self.get(__magic_name__ ) )
def lowerCamelCase__ ( self :int , __magic_name__ :int ):
'''simple docstring'''
if right == 0:
return 0
a = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
a = self.prev(__magic_name__ )
return result
def lowerCamelCase__ ( self :int , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
return self.prefix(__magic_name__ ) - self.prefix(__magic_name__ )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :int ):
'''simple docstring'''
return self.query(__magic_name__ , index + 1 )
def lowerCamelCase__ ( self :Dict , __magic_name__ :int ):
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
a = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
a = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 0 |
def __A ( __lowerCamelCase ) -> None:
a = generate_pascal_triangle(_lowerCAmelCase )
for row_idx in range(_lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def __A ( __lowerCamelCase ) -> list[list[int]]:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
a = []
for current_row_idx in range(_lowerCAmelCase ):
a = populate_current_row(_lowerCAmelCase , _lowerCAmelCase )
triangle.append(_lowerCAmelCase )
return triangle
def __A ( __lowerCamelCase , __lowerCamelCase ) -> list[int]:
a = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a = 1, 1
for current_col_idx in range(1 , _lowerCAmelCase ):
calculate_current_element(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return current_row
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> None:
a = triangle[current_row_idx - 1][current_col_idx - 1]
a = triangle[current_row_idx - 1][current_col_idx]
a = above_to_left_elt + above_to_right_elt
def __A ( __lowerCamelCase ) -> list[list[int]]:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
a = [[1]]
for row_index in range(1 , _lowerCAmelCase ):
a = [0] + result[-1] + [0]
a = row_index + 1
# Calculate the number of distinct elements in a row
a = sum(divmod(_lowerCAmelCase , 2 ) )
a = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a = row_first_half + row_second_half
result.append(_lowerCAmelCase )
return result
def __A ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCamelCase , __lowerCamelCase ) -> None:
a = f'{func.__name__}({value})'
a = timeit(f'__main__.{call}' , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_lowerCAmelCase , _lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 354 |
from __future__ import annotations
from typing import Generic, TypeVar
__UpperCamelCase : Union[str, Any] = TypeVar("T")
class __lowerCAmelCase ( Generic[T] ):
def __init__( self :Tuple , __magic_name__ :T ):
'''simple docstring'''
a = data
a = self
a = 0
class __lowerCAmelCase ( Generic[T] ):
def __init__( self :Tuple ):
'''simple docstring'''
a = {}
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :T ):
'''simple docstring'''
a = DisjointSetTreeNode(__magic_name__ )
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :T ):
'''simple docstring'''
a = self.map[data]
if elem_ref != elem_ref.parent:
a = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :DisjointSetTreeNode[T] , __magic_name__ :DisjointSetTreeNode[T] ):
'''simple docstring'''
if nodea.rank > nodea.rank:
a = nodea
else:
a = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :T , __magic_name__ :T ):
'''simple docstring'''
self.link(self.find_set(__magic_name__ ) , self.find_set(__magic_name__ ) )
class __lowerCAmelCase ( Generic[T] ):
def __init__( self :Union[str, Any] ):
'''simple docstring'''
a = {}
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :T ):
'''simple docstring'''
if node not in self.connections:
a = {}
def lowerCamelCase__ ( self :Any , __magic_name__ :T , __magic_name__ :T , __magic_name__ :int ):
'''simple docstring'''
self.add_node(__magic_name__ )
self.add_node(__magic_name__ )
a = weight
a = weight
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = []
a = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __magic_name__ : x[2] )
# creating the disjoint set
a = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__magic_name__ )
# MST generation
a = 0
a = 0
a = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
a , a , a = edges[index]
index += 1
a = disjoint_set.find_set(__magic_name__ )
a = disjoint_set.find_set(__magic_name__ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__magic_name__ , __magic_name__ , __magic_name__ )
disjoint_set.union(__magic_name__ , __magic_name__ )
return graph
| 347 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCamelCase : int = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = ['''pixel_values''']
def __init__( self :List[str] , __magic_name__ :Optional[Any] = True , __magic_name__ :int = 32 , __magic_name__ :Union[str, Any]=PILImageResampling.BILINEAR , __magic_name__ :List[str] = True , **__magic_name__ :List[str] , ):
'''simple docstring'''
a = do_resize
a = do_rescale
a = size_divisor
a = resample
super().__init__(**lowerCamelCase_ )
def lowerCamelCase__ ( self :Any , __magic_name__ :int , __magic_name__ :Dict , __magic_name__ :List[str] , __magic_name__ :Union[str, Any] = None , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a , a = get_image_size(lowerCamelCase_ )
# Rounds the height and width down to the closest multiple of size_divisor
a = height // size_divisor * size_divisor
a = width // size_divisor * size_divisor
a = resize(lowerCamelCase_ , (new_h, new_w) , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
return image
def lowerCamelCase__ ( self :Tuple , __magic_name__ :str , __magic_name__ :Optional[Any] , __magic_name__ :int = None , **__magic_name__ :Dict ):
'''simple docstring'''
return rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[int] = None , __magic_name__ :Any = None , __magic_name__ :Tuple=None , __magic_name__ :Tuple = None , __magic_name__ :List[Any] = None , __magic_name__ :List[str] = ChannelDimension.FIRST , **__magic_name__ :Optional[int] , ):
'''simple docstring'''
a = do_resize if do_resize is not None else self.do_resize
a = do_rescale if do_rescale is not None else self.do_rescale
a = size_divisor if size_divisor is not None else self.size_divisor
a = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
a = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
a = [to_numpy_array(lowerCamelCase_ ) for img in images]
if do_resize:
a = [self.resize(lowerCamelCase_ , size_divisor=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_rescale:
a = [self.rescale(lowerCamelCase_ , scale=1 / 255 ) for image in images]
a = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
a = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
| 355 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = tempfile.mkdtemp()
a = BlipImageProcessor()
a = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
a = BlipProcessor(__magic_name__ , __magic_name__ )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self :List[Any] , **__magic_name__ :Union[str, Any] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer
def lowerCamelCase__ ( self :str , **__magic_name__ :List[str] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
a = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
a = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = self.prepare_image_inputs()
a = image_processor(__magic_name__ , return_tensors="""np""" )
a = processor(images=__magic_name__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = """lower newer"""
a = processor(text=__magic_name__ )
a = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = """lower newer"""
a = self.prepare_image_inputs()
a = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__magic_name__ )
a = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = """lower newer"""
a = self.prepare_image_inputs()
a = processor(text=__magic_name__ , images=__magic_name__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 347 | 0 |
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __A ( __lowerCamelCase , __lowerCamelCase="shi-labs/oneformer_demo" ) -> str:
with open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) as f:
a = json.load(__UpperCAmelCase )
a = {}
a = []
a = []
for key, info in class_info.items():
a = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(__UpperCAmelCase ) )
a = thing_ids
a = class_names
return metadata
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self :int , __magic_name__ :Optional[int] , __magic_name__ :Optional[Any]=7 , __magic_name__ :List[str]=3 , __magic_name__ :Tuple=30 , __magic_name__ :str=400 , __magic_name__ :Optional[int]=None , __magic_name__ :Optional[Any]=True , __magic_name__ :Any=True , __magic_name__ :Optional[int]=[0.5, 0.5, 0.5] , __magic_name__ :Optional[int]=[0.5, 0.5, 0.5] , __magic_name__ :List[Any]=10 , __magic_name__ :Tuple=False , __magic_name__ :Tuple=255 , __magic_name__ :List[Any]="shi-labs/oneformer_demo" , __magic_name__ :Any="ade20k_panoptic.json" , __magic_name__ :List[str]=10 , ):
'''simple docstring'''
a = parent
a = batch_size
a = num_channels
a = min_resolution
a = max_resolution
a = do_resize
a = {"""shortest_edge""": 32, """longest_edge""": 1333} if size is None else size
a = do_normalize
a = image_mean
a = image_std
a = class_info_file
a = prepare_metadata(lowercase_ , lowercase_ )
a = num_text
a = repo_path
# for the post_process_functions
a = 2
a = 10
a = 10
a = 3
a = 4
a = num_labels
a = do_reduce_labels
a = ignore_index
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Dict , __magic_name__ :Optional[Any]=False ):
'''simple docstring'''
if not batched:
a = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
a , a = image.size
else:
a , a = image.shape[1], image.shape[2]
if w < h:
a = int(self.size["""shortest_edge"""] * h / w )
a = self.size["""shortest_edge"""]
elif w > h:
a = self.size["""shortest_edge"""]
a = int(self.size["""shortest_edge"""] * w / h )
else:
a = self.size["""shortest_edge"""]
a = self.size["""shortest_edge"""]
else:
a = []
for image in image_inputs:
a , a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a = max(lowercase_ , key=lambda __magic_name__ : item[0] )[0]
a = max(lowercase_ , key=lambda __magic_name__ : item[1] )[1]
return expected_height, expected_width
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __lowerCAmelCase ( _lowerCamelCase , unittest.TestCase ):
UpperCamelCase__ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
UpperCamelCase__ = image_processing_class
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = OneFormerImageProcessorTester(self )
@property
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowercase_ , """image_std""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """ignore_index""" ) )
self.assertTrue(hasattr(lowercase_ , """class_info_file""" ) )
self.assertTrue(hasattr(lowercase_ , """num_text""" ) )
self.assertTrue(hasattr(lowercase_ , """repo_path""" ) )
self.assertTrue(hasattr(lowercase_ , """metadata""" ) )
self.assertTrue(hasattr(lowercase_ , """do_reduce_labels""" ) )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
a = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
a , a = self.image_processing_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a = self.image_processing_tester.get_expected_values(lowercase_ , batched=lowercase_ )
a = image_processor(
lowercase_ , ["""semantic"""] * len(lowercase_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
a = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
a , a = self.image_processing_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a = self.image_processing_tester.get_expected_values(lowercase_ , batched=lowercase_ )
a = image_processor(
lowercase_ , ["""semantic"""] * len(lowercase_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
a = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
a , a = self.image_processing_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a = self.image_processing_tester.get_expected_values(lowercase_ , batched=lowercase_ )
a = image_processor(
lowercase_ , ["""semantic"""] * len(lowercase_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self :str , __magic_name__ :Optional[Any]=False , __magic_name__ :Dict=False , __magic_name__ :int="np" ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
a = self.image_processing_tester.num_labels
a = None
a = None
a = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowercase_ )
if with_segmentation_maps:
a = num_labels
if is_instance_map:
a = list(range(lowercase_ ) ) * 2
a = dict(enumerate(lowercase_ ) )
a = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
a = [Image.fromarray(lowercase_ ) for annotation in annotations]
a = image_processor(
lowercase_ , ["""semantic"""] * len(lowercase_ ) , lowercase_ , return_tensors="""pt""" , instance_id_to_semantic_id=lowercase_ , pad_and_return_pixel_mask=lowercase_ , )
return inputs
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
def common(__magic_name__ :int=False , __magic_name__ :Tuple=None ):
a = self.comm_get_image_processor_inputs(
with_segmentation_maps=lowercase_ , is_instance_map=lowercase_ , segmentation_type=lowercase_ )
a = inputs["""mask_labels"""]
a = inputs["""class_labels"""]
a = inputs["""pixel_values"""]
a = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(lowercase_ , lowercase_ , lowercase_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(lowercase_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=lowercase_ )
common(is_instance_map=lowercase_ , segmentation_type="""pil""" )
common(is_instance_map=lowercase_ , segmentation_type="""pil""" )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = np.zeros((20, 50) )
a = 1
a = 1
a = 1
a = binary_mask_to_rle(lowercase_ )
self.assertEqual(len(lowercase_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
a = self.image_processing_tester.get_fake_oneformer_outputs()
a = fature_extractor.post_process_semantic_segmentation(lowercase_ )
self.assertEqual(len(lowercase_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
a = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
a = fature_extractor.post_process_semantic_segmentation(lowercase_ , target_sizes=lowercase_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
a = self.image_processing_tester.get_fake_oneformer_outputs()
a = image_processor.post_process_instance_segmentation(lowercase_ , threshold=0 )
self.assertTrue(len(lowercase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , lowercase_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
a = self.image_processing_tester.get_fake_oneformer_outputs()
a = image_processor.post_process_panoptic_segmentation(lowercase_ , threshold=0 )
self.assertTrue(len(lowercase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , lowercase_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 356 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
UpperCamelCase__ = '''nat'''
UpperCamelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :Any , __magic_name__ :int=4 , __magic_name__ :Dict=3 , __magic_name__ :List[str]=64 , __magic_name__ :Optional[int]=[3, 4, 6, 5] , __magic_name__ :int=[2, 4, 8, 16] , __magic_name__ :str=7 , __magic_name__ :Tuple=3.0 , __magic_name__ :Dict=True , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.1 , __magic_name__ :Optional[Any]="gelu" , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :Tuple=1E-5 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :int=None , __magic_name__ :Any=None , **__magic_name__ :Dict , ):
'''simple docstring'''
super().__init__(**__magic_name__ )
a = patch_size
a = num_channels
a = embed_dim
a = depths
a = len(__magic_name__ )
a = num_heads
a = kernel_size
a = mlp_ratio
a = qkv_bias
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = drop_path_rate
a = hidden_act
a = layer_norm_eps
a = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) )
a = layer_scale_init_value
a = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(__magic_name__ ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
| 347 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
a = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
a = model(lowerCamelCase__ )["""last_hidden_state"""]
a = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , lowerCamelCase__ )
# compare the actual values for a slice.
a = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 357 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
a = flax_key_tuple[:-1] + ("""weight""",)
a = torch.permute(__lowerCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ):
# linear layer
a = flax_key_tuple[:-1] + ("""weight""",)
a = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
if "metadata" in layer:
a = layer.split("""metadata""" )
a = """""".join(split_layer[0] )[:-1]
a = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
a = layer.split("""kvstore""" )
a = """""".join(split_layer[0] )[:-1]
a = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
a = layer.split("""/""" )
a = """/""".join(split_layer[:-1] )
a = (split_layer[-1],)
if "kvstore/path" in layer:
a = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
a = """file"""
else:
a = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
a = rename_keys(__lowerCamelCase )
a = {}
for k, v in current_block.items():
a = v
a = new_current_block
torch.save(__lowerCamelCase , __lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = WEIGHTS_NAME ) -> List[str]:
a = convert_file_size_to_int(__lowerCamelCase )
a = []
a = {}
a = 0
a = 0
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
a = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
a = flatten_dict(__lowerCamelCase , sep="""/""" )
a = {}
for layer in checkpoint_info.keys():
a , a , a = get_key_and_tensorstore_dict(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if curr_real_layer_name in all_layers:
a = content
else:
a = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
a = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
a = torch.tensor(__lowerCamelCase )
a = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
a , a = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __lowerCamelCase )
a = """/""".join(__lowerCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
a = os.path.join(
__lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(__lowerCamelCase , __lowerCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
a = {}
a = 0
a = raw_weights.to(getattr(__lowerCamelCase , __lowerCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(__lowerCamelCase , __lowerCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__lowerCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
a = {}
a = {}
for idx, shard in enumerate(__lowerCamelCase ):
a = weights_name.replace(
""".bin""" , f'-{idx+1:05d}-of-{len(__lowerCamelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
a = shard
for key in shard:
a = shard_file
# Add the metadata
a = {"""total_size""": total_size}
a = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
a = json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase ) + """\n"""
f.write(__lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase : Any = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __A ( ) -> Tuple:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
a = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
a = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
a = TaTokenizer.from_pretrained("""t5-small""" )
a = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
a = tokenizer(__lowerCamelCase , return_tensors="""pt""" ).input_ids
a = model.generate(__lowerCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 347 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Dict = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 358 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__UpperCamelCase : Union[str, Any] = (720, 1_280) # Height, Width
__UpperCamelCase : Any = (0.4, 0.6) # if height or width lower than this scale, drop it.
__UpperCamelCase : str = 1 / 100
__UpperCamelCase : Optional[int] = ""
__UpperCamelCase : List[Any] = ""
__UpperCamelCase : Union[str, Any] = ""
__UpperCamelCase : Tuple = 250
def __A ( ) -> None:
a , a = get_dataset(__lowerCamelCase , __lowerCamelCase )
for index in range(__lowerCamelCase ):
a = random.sample(range(len(__lowerCamelCase ) ) , 4 )
a , a , a = update_image_and_anno(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , filter_scale=__lowerCamelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
a = random_chars(32 )
a = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
a = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'
cva.imwrite(f'{file_root}.jpg' , __lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' )
a = []
for anno in new_annos:
a = anno[3] - anno[1]
a = anno[4] - anno[2]
a = anno[1] + width / 2
a = anno[2] + height / 2
a = f'{anno[0]} {x_center} {y_center} {width} {height}'
annos_list.append(__lowerCamelCase )
with open(f'{file_root}.txt' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> tuple[list, list]:
a = []
a = []
for label_file in glob.glob(os.path.join(__lowerCamelCase , """*.txt""" ) ):
a = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(__lowerCamelCase ) as in_file:
a = in_file.readlines()
a = os.path.join(__lowerCamelCase , f'{label_name}.jpg' )
a = []
for obj_list in obj_lists:
a = obj_list.rstrip("""\n""" ).split(""" """ )
a = float(obj[1] ) - float(obj[3] ) / 2
a = float(obj[2] ) - float(obj[4] ) / 2
a = float(obj[1] ) + float(obj[3] ) / 2
a = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__lowerCamelCase )
labels.append(__lowerCamelCase )
return img_paths, labels
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 0.0 , ) -> tuple[list, list, str]:
a = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
a = int(scale_x * output_size[1] )
a = int(scale_y * output_size[0] )
a = []
a = []
for i, index in enumerate(__lowerCamelCase ):
a = all_img_list[index]
path_list.append(__lowerCamelCase )
a = all_annos[index]
a = cva.imread(__lowerCamelCase )
if i == 0: # top-left
a = cva.resize(__lowerCamelCase , (divid_point_x, divid_point_y) )
a = img
for bbox in img_annos:
a = bbox[1] * scale_x
a = bbox[2] * scale_y
a = bbox[3] * scale_x
a = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
a = cva.resize(__lowerCamelCase , (output_size[1] - divid_point_x, divid_point_y) )
a = img
for bbox in img_annos:
a = scale_x + bbox[1] * (1 - scale_x)
a = bbox[2] * scale_y
a = scale_x + bbox[3] * (1 - scale_x)
a = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
a = cva.resize(__lowerCamelCase , (divid_point_x, output_size[0] - divid_point_y) )
a = img
for bbox in img_annos:
a = bbox[1] * scale_x
a = scale_y + bbox[2] * (1 - scale_y)
a = bbox[3] * scale_x
a = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
a = cva.resize(
__lowerCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
a = img
for bbox in img_annos:
a = scale_x + bbox[1] * (1 - scale_x)
a = scale_y + bbox[2] * (1 - scale_y)
a = scale_x + bbox[3] * (1 - scale_x)
a = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
a = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __A ( __lowerCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
a = ascii_lowercase + digits
return "".join(random.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 347 | 0 |
import os
def __A ( ) -> Union[str, Any]:
a : Optional[Any] = os.path.join(os.path.dirname(_lowercase ) , """num.txt""" )
with open(_lowercase ) as file_hand:
return str(sum(int(_lowercase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 359 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Optional[Any] = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ["MobileNetV2FeatureExtractor"]
__UpperCamelCase : Tuple = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 347 | 0 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__UpperCamelCase : Optional[Any] = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __A ( __lowerCamelCase ) -> Any:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __A ( __lowerCamelCase , __lowerCamelCase ) -> int:
if args.student_type == "roberta":
a = False
elif args.student_type == "gpt2":
a = False
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
if args.student_type == "roberta":
a = False
def __A ( ) -> List[str]:
a = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=__lowerCamelCase , required=__lowerCamelCase , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=__lowerCamelCase , required=__lowerCamelCase , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=__lowerCamelCase , choices=["""distilbert""", """roberta""", """gpt2"""] , required=__lowerCamelCase , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=__lowerCamelCase , required=__lowerCamelCase , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=__lowerCamelCase , type=__lowerCamelCase , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=__lowerCamelCase , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=__lowerCamelCase , required=__lowerCamelCase , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=__lowerCamelCase , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=__lowerCamelCase , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=__lowerCamelCase , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=__lowerCamelCase , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=__lowerCamelCase , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=__lowerCamelCase , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=__lowerCamelCase , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=__lowerCamelCase , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=__lowerCamelCase , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=__lowerCamelCase , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=__lowerCamelCase , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=__lowerCamelCase , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=__lowerCamelCase , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=__lowerCamelCase , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__lowerCamelCase , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=__lowerCamelCase , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=__lowerCamelCase , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5E-4 , type=__lowerCamelCase , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=__lowerCamelCase , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=__lowerCamelCase , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=__lowerCamelCase , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=__lowerCamelCase , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=__lowerCamelCase , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=__lowerCamelCase , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=__lowerCamelCase , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=__lowerCamelCase , default=500 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=__lowerCamelCase , default=4000 , help="""Checkpoint interval.""" )
a = parser.parse_args()
sanity_checks(__lowerCamelCase )
# ARGS #
init_gpu_params(__lowerCamelCase )
set_seed(__lowerCamelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(f'Param: {args}' )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(__lowerCamelCase ) , __lowerCamelCase , indent=4 )
git_log(args.dump_path )
a = MODEL_CLASSES[args.student_type]
a = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
a = teacher_tokenizer_class.from_pretrained(args.teacher_name )
a = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
a = tokenizer.all_special_tokens.index(__lowerCamelCase )
a = tokenizer.all_special_ids[idx]
logger.info(f'Special tokens {special_tok_ids}' )
a = special_tok_ids
a = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'Loading data from {args.data_file}' )
with open(args.data_file , """rb""" ) as fp:
a = pickle.load(__lowerCamelCase )
if args.mlm:
logger.info(f'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts , """rb""" ) as fp:
a = pickle.load(__lowerCamelCase )
a = np.maximum(__lowerCamelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
a = 0.0 # do not predict special tokens
a = torch.from_numpy(__lowerCamelCase )
else:
a = None
a = LmSeqsDataset(params=__lowerCamelCase , data=__lowerCamelCase )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f'Loading student config from {args.student_config}' )
a = student_config_class.from_pretrained(args.student_config )
a = True
if args.student_pretrained_weights is not None:
logger.info(f'Loading pretrained weights from {args.student_pretrained_weights}' )
a = student_model_class.from_pretrained(args.student_pretrained_weights , config=__lowerCamelCase )
else:
a = student_model_class(__lowerCamelCase )
if args.n_gpu > 0:
student.to(f'cuda:{args.local_rank}' )
logger.info("""Student loaded.""" )
# TEACHER #
a = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__lowerCamelCase )
if args.n_gpu > 0:
teacher.to(f'cuda:{args.local_rank}' )
logger.info(f'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__lowerCamelCase , __lowerCamelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__lowerCamelCase , __lowerCamelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
a = Distiller(
params=__lowerCamelCase , dataset=__lowerCamelCase , token_probs=__lowerCamelCase , student=__lowerCamelCase , teacher=__lowerCamelCase )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 360 |
def __A ( __lowerCamelCase ) -> bool:
if num < 0:
return False
a = num
a = 0
while num > 0:
a = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 0 |
__UpperCamelCase : Dict = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
a = [False] * len(_a )
a = [s]
a = True
while queue:
a = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
a = True
a = u
return visited[t]
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
a = [-1] * (len(_a ))
a = 0
a = []
a = [i[:] for i in graph] # Record original cut, copy.
while bfs(_a , _a , _a , _a ):
a = float("""Inf""" )
a = sink
while s != source:
# Find the minimum value in select path
a = min(_a , graph[parent[s]][s] )
a = parent[s]
max_flow += path_flow
a = sink
while v != source:
a = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
a = parent[v]
for i in range(len(_a ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 361 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = CanineTokenizer
UpperCamelCase__ = False
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
super().setUp()
a = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def lowerCamelCase__ ( self :Tuple , **__magic_name__ :Dict ):
'''simple docstring'''
a = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
a = 1024
return tokenizer
@require_torch
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.canine_tokenizer
a = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
a = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.canine_tokenizer
a = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , __magic_name__ )
self.assertIn("""attention_mask""" , __magic_name__ )
self.assertIn("""token_type_ids""" , __magic_name__ )
@require_torch
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.canine_tokenizer
a = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
a = tokenizer(
text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a = tempfile.mkdtemp()
a = """ He is very happy, UNwant\u00E9d,running"""
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
a = tokenizer.__class__.from_pretrained(__magic_name__ )
a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
shutil.rmtree(__magic_name__ )
a = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a = tempfile.mkdtemp()
a = """ He is very happy, UNwant\u00E9d,running"""
a = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
a = chr(0Xe_0_0_7 )
additional_special_tokens.append(__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
a = tokenizer.__class__.from_pretrained(__magic_name__ )
a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
a = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__magic_name__ )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a , a = self.get_clean_sequence(__magic_name__ )
# a special token for Canine can be defined as follows:
a = 0Xe_0_0_5
a = chr(__magic_name__ )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
a = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , input_encoded + special_token_id )
a = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
self.assertTrue(special_token not in decoded )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a = chr(0Xe_0_0_5 )
a = chr(0Xe_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
a = tokenizer.tokenize(__magic_name__ )
a = tokenizer.tokenize(__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(token_a[0] , __magic_name__ )
self.assertEqual(token_a[0] , __magic_name__ )
@require_tokenizers
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
a = 0Xe_0_0_6
a = chr(__magic_name__ )
a = AddedToken(__magic_name__ , lstrip=__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__magic_name__ )
tokenizer.from_pretrained(__magic_name__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
a = json.load(__magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
a = json.load(__magic_name__ )
# a special token for Canine can be defined as follows:
a = 0Xe_0_0_6
a = chr(__magic_name__ )
a = [new_token_a]
a = [new_token_a]
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
a = 0Xe_0_0_7
a = chr(__magic_name__ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a = [AddedToken(__magic_name__ , lstrip=__magic_name__ )]
a = tokenizer_class.from_pretrained(
__magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a = """hello world"""
if self.space_between_special_tokens:
a = """[CLS] hello world [SEP]"""
else:
a = input
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__magic_name__ , [output, output.lower()] )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
a = """a"""
a = ord(__magic_name__ )
for attr in attributes_list:
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] )
a = 0Xe_0_0_6
a = chr(__magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
| 347 | 0 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = 0
@slow
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
a = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(snake_case__ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
a = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(snake_case__ ) , 0 )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = AutoConfig.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
# Check that tokenizer_type ≠ model_type
a = AutoTokenizer.from_pretrained(snake_case__ , config=snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(snake_case__ , """vocab.txt""" ) )
a = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type="""bert""" , use_fast=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(snake_case__ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(snake_case__ , """merges.txt""" ) )
a = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type="""gpt2""" , use_fast=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@require_tokenizers
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(snake_case__ , """vocab.txt""" ) )
a = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type="""bert""" )
self.assertIsInstance(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(snake_case__ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(snake_case__ , """merges.txt""" ) )
a = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type="""gpt2""" )
self.assertIsInstance(snake_case__ , snake_case__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
with pytest.raises(snake_case__ ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
a = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
if isinstance(snake_case__ , snake_case__ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , snake_case__ )
else:
self.assertEqual(tokenizer.do_lower_case , snake_case__ )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
snake_case__ , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
a = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = TOKENIZER_MAPPING.values()
a = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(snake_case__ )
@require_tokenizers
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=snake_case__ ) , snake_case__ )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , snake_case__ )
@require_tokenizers
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=snake_case__ )
a = '''Hello, world. How are you?'''
a = tokenizer.tokenize(snake_case__ )
self.assertEqual("""[UNK]""" , tokens[0] )
a = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=snake_case__ )
a = tokenizer.tokenize(snake_case__ )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(snake_case__ ) , snake_case__ )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
a = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(snake_case__ , snake_case__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = get_tokenizer_config("""bert-base-cased""" )
a = config.pop("""_commit_hash""" , snake_case__ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(snake_case__ , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
a = get_tokenizer_config(snake_case__ )
self.assertDictEqual(snake_case__ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
a = AutoTokenizer.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
a = get_tokenizer_config(snake_case__ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
a = CustomTokenizer.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
a = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
try:
AutoConfig.register("""custom""" , snake_case__ )
# Can register in two steps
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(snake_case__ , fast_tokenizer_class=snake_case__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
snake_case__ , slow_tokenizer_class=snake_case__ , fast_tokenizer_class=snake_case__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoTokenizer.register(snake_case__ , fast_tokenizer_class=snake_case__ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
a = BertTokenizerFast.from_pretrained(snake_case__ )
bert_tokenizer.save_pretrained(snake_case__ )
a = CustomTokenizerFast.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
a = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
a = AutoTokenizer.from_pretrained(snake_case__ , use_fast=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
with self.assertRaises(snake_case__ ):
a = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
a = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=snake_case__ )
a = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
a = AutoTokenizer.from_pretrained(snake_case__ , trust_remote_code=snake_case__ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
a = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
a = AutoTokenizer.from_pretrained(snake_case__ , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
class __lowerCAmelCase ( lowerCamelCase__ ):
UpperCamelCase__ = False
class __lowerCAmelCase ( lowerCamelCase__ ):
UpperCamelCase__ = NewTokenizer
UpperCamelCase__ = False
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoTokenizer.register(snake_case__ , fast_tokenizer_class=snake_case__ )
# If remote code is not set, the default is to use local
a = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
a = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
a = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
a = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
a = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
a = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
a = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case__ , """bert-base is not a local folder and is not a valid model identifier""" ):
a = AutoTokenizer.from_pretrained("""bert-base""" )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
a = AutoTokenizer.from_pretrained(snake_case__ , revision="""aaaaaa""" )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
a = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 362 |
def __A ( __lowerCamelCase ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 0 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__UpperCamelCase : Optional[Any] = '''
import os
'''
__UpperCamelCase : List[str] = '''
def foo():
import os
return False
'''
__UpperCamelCase : List[Any] = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
__UpperCamelCase : Optional[int] = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
__UpperCamelCase : Dict = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
__UpperCamelCase : Optional[int] = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
__UpperCamelCase : List[Any] = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
__UpperCamelCase : Tuple = '''
import os
try:
import bar
except:
raise ValueError()
'''
__UpperCamelCase : str = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
__UpperCamelCase : List[str] = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
__UpperCamelCase : List[Any] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , _lowercase )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
a = os.path.join(_lowercase , """test_file.py""" )
with open(_lowercase , """w""" ) as _tmp_file:
_tmp_file.write(_lowercase )
a = get_imports(_lowercase )
assert parsed_imports == ["os"]
| 363 |
def __A ( __lowerCamelCase ) -> int:
if not numbers:
return 0
if not isinstance(__lowerCamelCase , (list, tuple) ) or not all(
isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
a = a = a = numbers[0]
for i in range(1 , len(__lowerCamelCase ) ):
# update the maximum and minimum subarray products
a = numbers[i]
if number < 0:
a , a = min_till_now, max_till_now
a = max(__lowerCamelCase , max_till_now * number )
a = min(__lowerCamelCase , min_till_now * number )
# update the maximum product found till now
a = max(__lowerCamelCase , __lowerCamelCase )
return max_prod
| 347 | 0 |
from collections import defaultdict
from math import gcd
def __A ( __lowerCamelCase = 150_0000 ) -> int:
a = defaultdict(__lowerCamelCase )
a = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __lowerCamelCase , 2 ):
if gcd(__lowerCamelCase , __lowerCamelCase ) > 1:
continue
a = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__lowerCamelCase , limit + 1 , __lowerCamelCase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'{solution() = }')
| 364 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Optional[Any] = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 347 | 0 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a , a = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=lowerCAmelCase_ , dtype=jnp.bfloataa )
a , a = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=lowerCAmelCase_ , from_pt=lowerCAmelCase_ , dtype=jnp.bfloataa )
a = controlnet_params
a = """bird"""
a = jax.device_count()
a = pipe.prepare_text_inputs([prompts] * num_samples )
a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
a = pipe.prepare_image_inputs([canny_image] * num_samples )
a = jax.random.PRNGKey(0 )
a = jax.random.split(lowerCAmelCase_ , jax.device_count() )
a = replicate(lowerCAmelCase_ )
a = shard(lowerCAmelCase_ )
a = shard(lowerCAmelCase_ )
a = pipe(
prompt_ids=lowerCAmelCase_ , image=lowerCAmelCase_ , params=lowerCAmelCase_ , prng_seed=lowerCAmelCase_ , num_inference_steps=50 , jit=lowerCAmelCase_ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
a = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a = images[0, 253:256, 253:256, -1]
a = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a , a = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=lowerCAmelCase_ , dtype=jnp.bfloataa )
a , a = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=lowerCAmelCase_ , from_pt=lowerCAmelCase_ , dtype=jnp.bfloataa )
a = controlnet_params
a = """Chef in the kitchen"""
a = jax.device_count()
a = pipe.prepare_text_inputs([prompts] * num_samples )
a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
a = pipe.prepare_image_inputs([pose_image] * num_samples )
a = jax.random.PRNGKey(0 )
a = jax.random.split(lowerCAmelCase_ , jax.device_count() )
a = replicate(lowerCAmelCase_ )
a = shard(lowerCAmelCase_ )
a = shard(lowerCAmelCase_ )
a = pipe(
prompt_ids=lowerCAmelCase_ , image=lowerCAmelCase_ , params=lowerCAmelCase_ , prng_seed=lowerCAmelCase_ , num_inference_steps=50 , jit=lowerCAmelCase_ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
a = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a = images[0, 253:256, 253:256, -1]
a = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 365 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , __lowerCamelCase )
a = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
a = dataset_size < in_memory_max_size
else:
a = False
a = is_small_dataset(__lowerCamelCase )
assert result == expected
| 347 | 0 |
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
return round(float(moles / volume ) * nfactor )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __A ( __lowerCamelCase ) -> bool:
a = int(number**0.5 )
return number == sq * sq
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> tuple[int, int]:
a = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
a = x_den * y_den * z_den
a = gcd(__lowerCamelCase , __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def __A ( __lowerCamelCase = 35 ) -> int:
a = set()
a = 42
a = Fraction(0 )
a = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
a = x_num * y_den + x_den * y_num
a = x_den * y_den
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
a = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
a = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
a = int(sqrt(__lowerCamelCase ) )
a = int(sqrt(__lowerCamelCase ) )
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
a = x_num * y_num
a = x_den * y_num + x_num * y_den
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
a = x_num * x_num * y_num * y_num
a = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
a = int(sqrt(__lowerCamelCase ) )
a = int(sqrt(__lowerCamelCase ) )
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase , __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'{solution() = }')
| 347 | 0 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__UpperCamelCase : str = trt.Logger(trt.Logger.WARNING)
__UpperCamelCase : Tuple = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__UpperCamelCase : int = logging.getLogger(__name__)
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
__UpperCamelCase : List[str] = parser.parse_args()
if args.tokenizer_name:
__UpperCamelCase : int = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
__UpperCamelCase : Any = args.per_device_eval_batch_size
__UpperCamelCase : Optional[Any] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__UpperCamelCase : str = True
__UpperCamelCase : Optional[Any] = "temp_engine/bert-fp32.engine"
if args.fpaa:
__UpperCamelCase : Tuple = "temp_engine/bert-fp16.engine"
if args.inta:
__UpperCamelCase : Dict = "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
__UpperCamelCase : List[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__UpperCamelCase : Dict = [network.get_input(i) for i in range(network.num_inputs)]
__UpperCamelCase : Union[str, Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__UpperCamelCase : Tuple = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__UpperCamelCase : List[Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__UpperCamelCase : Any = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
a = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
a = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
a = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , SCREAMING_SNAKE_CASE__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , SCREAMING_SNAKE_CASE__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , SCREAMING_SNAKE_CASE__ )
# start time
a = time.time()
# Run inference
context.execute_async(
bindings=[int(SCREAMING_SNAKE_CASE__ ) for d_inp in d_inputs] + [int(SCREAMING_SNAKE_CASE__ ), int(SCREAMING_SNAKE_CASE__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
a = time.time()
a = end_time - start_time
a = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__UpperCamelCase : str = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__UpperCamelCase : List[str] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__UpperCamelCase : Dict = raw_datasets["validation"].column_names
__UpperCamelCase : Optional[Any] = "question" if "question" in column_names else column_names[0]
__UpperCamelCase : Optional[Any] = "context" if "context" in column_names else column_names[1]
__UpperCamelCase : List[Any] = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__UpperCamelCase : Optional[int] = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
__UpperCamelCase : int = min(args.max_seq_length, tokenizer.model_max_length)
def __A ( __lowerCamelCase ) -> Optional[int]:
a = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
a = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=SCREAMING_SNAKE_CASE__ , stride=args.doc_stride , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
a = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
a = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
a = tokenized_examples.sequence_ids(SCREAMING_SNAKE_CASE__ )
a = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
a = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
a = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__UpperCamelCase : Dict = raw_datasets["validation"]
# Validation Feature Creation
__UpperCamelCase : Optional[Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
__UpperCamelCase : int = default_data_collator
__UpperCamelCase : Optional[int] = eval_dataset.remove_columns(["example_id", "offset_mapping"])
__UpperCamelCase : str = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="eval" ) -> Any:
a = postprocess_qa_predictions(
examples=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , predictions=SCREAMING_SNAKE_CASE__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=SCREAMING_SNAKE_CASE__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
a = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
a = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
a = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=SCREAMING_SNAKE_CASE__ , label_ids=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase : Union[str, Any] = load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def __A ( __lowerCamelCase ) -> Union[str, Any]:
return trt.volume(engine.get_binding_shape(SCREAMING_SNAKE_CASE__ ) ) * engine.get_binding_dtype(SCREAMING_SNAKE_CASE__ ).itemsize
# Allocate device memory for inputs and outputs.
__UpperCamelCase : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__UpperCamelCase : Any = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__UpperCamelCase : Any = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__UpperCamelCase : Optional[int] = cuda.mem_alloc(h_outputa.nbytes)
__UpperCamelCase : Optional[int] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__UpperCamelCase : List[Any] = cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(F' Num examples = {len(eval_dataset)}')
logger.info(F' Batch size = {args.per_device_eval_batch_size}')
__UpperCamelCase : Optional[int] = 0.0
__UpperCamelCase : Tuple = 0
__UpperCamelCase : Any = timeit.default_timer()
__UpperCamelCase : Tuple = None
for step, batch in enumerate(eval_dataloader):
__UpperCamelCase , __UpperCamelCase : Optional[int] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__UpperCamelCase , __UpperCamelCase : str = outputs
__UpperCamelCase : Optional[Any] = torch.tensor(start_logits)
__UpperCamelCase : Any = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__UpperCamelCase : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__UpperCamelCase : Optional[int] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__UpperCamelCase : Optional[int] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__UpperCamelCase : int = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__UpperCamelCase : Dict = nested_truncate(all_preds, len(eval_dataset))
__UpperCamelCase : List[str] = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1_000 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1_000))
logger.info("Total Number of Inference = %d", niter)
__UpperCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds)
__UpperCamelCase : Dict = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'Evaluation metrics: {eval_metric}')
| 367 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self :List[str] , __magic_name__ :List[str] , __magic_name__ :List[Any]=13 , __magic_name__ :Any=7 , __magic_name__ :Optional[int]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :Any=99 , __magic_name__ :List[str]=32 , __magic_name__ :List[str]=5 , __magic_name__ :str=4 , __magic_name__ :str=37 , __magic_name__ :Optional[int]="gelu" , __magic_name__ :int=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :List[str]=512 , __magic_name__ :Tuple=16 , __magic_name__ :Tuple=2 , __magic_name__ :List[str]=0.02 , __magic_name__ :Any=4 , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_attention_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_choices
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_attention_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = FlaxRoFormerModelTester(self )
@slow
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__magic_name__ )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
a = jnp.array([[0, 1, 2, 3, 4, 5]] )
a = model(__magic_name__ )[0]
a = 5_0000
a = (1, 6, vocab_size)
self.assertEqual(output.shape , __magic_name__ )
a = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 347 | 0 |
__UpperCamelCase : int = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__UpperCamelCase : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> list[int]:
a = True
a = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
order.append(lowerCamelCase_ )
return order
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> list[int]:
a = True
a = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return component
def __A ( __lowerCamelCase ) -> list[list[int]]:
a = len(lowerCamelCase_ ) * [False]
a = {vert: [] for vert in range(len(lowerCamelCase_ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(lowerCamelCase_ )
a = []
for i, was_visited in enumerate(lowerCamelCase_ ):
if not was_visited:
order += topology_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
a = []
a = len(lowerCamelCase_ ) * [False]
for i in range(len(lowerCamelCase_ ) ):
a = order[len(lowerCamelCase_ ) - i - 1]
if not visited[vert]:
a = find_components(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
components_list.append(lowerCamelCase_ )
return components_list
| 368 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[int] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 347 | 0 |
"""simple docstring"""
import os
__UpperCamelCase : Any = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
def __A ( __lowerCamelCase ) -> Dict:
a = 0
a = 0
while index < len(__UpperCamelCase ) - 1:
a = SYMBOLS[numerals[index]]
a = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __A ( __lowerCamelCase ) -> str:
a = """"""
a = num // 1000
numerals += m_count * "M"
num %= 1000
a = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
a = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __A ( __lowerCamelCase = "/p089_roman.txt" ) -> Optional[int]:
a = 0
with open(os.path.dirname(__UpperCamelCase ) + roman_numerals_filename ) as filea:
a = filea.readlines()
for line in lines:
a = line.strip()
a = parse_roman_numerals(__UpperCamelCase )
a = generate_roman_numerals(__UpperCamelCase )
savings += len(__UpperCamelCase ) - len(__UpperCamelCase )
return savings
if __name__ == "__main__":
print(F'{solution() = }')
| 369 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = (IPNDMScheduler,)
UpperCamelCase__ = (('''num_inference_steps''', 50),)
def lowerCamelCase__ ( self :Any , **__magic_name__ :Optional[Any] ):
'''simple docstring'''
a = {"""num_train_timesteps""": 1000}
config.update(**__magic_name__ )
return config
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple=0 , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__magic_name__ )
a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
a = dummy_past_residuals[:]
if time_step is None:
a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
a = scheduler_class.from_pretrained(__magic_name__ )
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
a = dummy_past_residuals[:]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[Any]=0 , **__magic_name__ :Any ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals (must be after setting timesteps)
a = dummy_past_residuals[:]
if time_step is None:
a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
a = scheduler_class.from_pretrained(__magic_name__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residual (must be after setting timesteps)
a = dummy_past_residuals[:]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self :Optional[Any] , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__magic_name__ )
a = scheduler_class(**__magic_name__ )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter
scheduler.set_timesteps(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
a = model(__magic_name__ , __magic_name__ )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
a = model(__magic_name__ , __magic_name__ )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
return sample
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
if num_inference_steps is not None and hasattr(__magic_name__ , """set_timesteps""" ):
scheduler.set_timesteps(__magic_name__ )
elif num_inference_steps is not None and not hasattr(__magic_name__ , """set_timesteps""" ):
a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a = dummy_past_residuals[:]
a = scheduler.timesteps[5]
a = scheduler.timesteps[6]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__magic_name__ , time_step=__magic_name__ )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__magic_name__ , time_step=__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.full_loop()
a = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 347 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Any = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
__UpperCamelCase : int = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__UpperCamelCase : Tuple = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__UpperCamelCase : str = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
__UpperCamelCase : Union[str, Any] = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
__UpperCamelCase : Optional[Any] = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
__UpperCamelCase : str = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
__UpperCamelCase : Union[str, Any] = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
__UpperCamelCase : Optional[int] = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
__UpperCamelCase : str = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
__UpperCamelCase : Optional[int] = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
__UpperCamelCase : Optional[Any] = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
__UpperCamelCase : Union[str, Any] = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
__UpperCamelCase : Union[str, Any] = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
__UpperCamelCase : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__UpperCamelCase : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__UpperCamelCase : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__UpperCamelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__UpperCamelCase : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__UpperCamelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__UpperCamelCase : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__UpperCamelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__UpperCamelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__UpperCamelCase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__UpperCamelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__UpperCamelCase : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__UpperCamelCase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__UpperCamelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_MAPPING
__UpperCamelCase : Dict = auto_class_update(FlaxAutoModel)
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__UpperCamelCase : Any = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__UpperCamelCase : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__UpperCamelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase : List[Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCamelCase : List[str] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__UpperCamelCase : List[Any] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__UpperCamelCase : List[str] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__UpperCamelCase : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__UpperCamelCase : Any = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__UpperCamelCase : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCamelCase : str = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__UpperCamelCase : Any = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 370 |
__UpperCamelCase : Dict = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def __A ( ) -> None:
a = input("""Enter message: """ )
a = input("""Enter key [alphanumeric]: """ )
a = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
a = """encrypt"""
a = encrypt_message(__lowerCamelCase , __lowerCamelCase )
elif mode.lower().startswith("""d""" ):
a = """decrypt"""
a = decrypt_message(__lowerCamelCase , __lowerCamelCase )
print(f'\n{mode.title()}ed message:' )
print(__lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
return translate_message(__lowerCamelCase , __lowerCamelCase , """encrypt""" )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
return translate_message(__lowerCamelCase , __lowerCamelCase , """decrypt""" )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
a = []
a = 0
a = key.upper()
for symbol in message:
a = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__lowerCamelCase ):
a = 0
else:
translated.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
if __name__ == "__main__":
main()
| 347 | 0 |
import os
from datetime import datetime as dt
from github import Github
__UpperCamelCase : Any = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def __A ( ) -> Dict:
a = Github(os.environ["""GITHUB_TOKEN"""] )
a = g.get_repo("""huggingface/diffusers""" )
a = repo.get_issues(state="""open""" )
for issue in open_issues:
a = sorted(issue.get_comments() , key=lambda __lowerCamelCase : i.created_at , reverse=a__ )
a = comments[0] if len(a__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 371 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Tuple=13 , __magic_name__ :List[Any]=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :List[str]=True , __magic_name__ :str=99 , __magic_name__ :Optional[Any]=32 , __magic_name__ :Union[str, Any]=5 , __magic_name__ :Any=4 , __magic_name__ :int=37 , __magic_name__ :Tuple="gelu" , __magic_name__ :List[str]=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :Tuple=512 , __magic_name__ :Dict=16 , __magic_name__ :Optional[int]=2 , __magic_name__ :Optional[int]=0.02 , __magic_name__ :Optional[Any]=4 , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_attention_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_choices
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_attention_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = True
a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained("""roberta-base""" , from_pt=__magic_name__ )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
| 347 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCamelCase : List[Any] = None
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCamelCase : List[str] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
__UpperCamelCase : List[Any] = {
"moussaKam/mbarthez": 1_024,
"moussaKam/barthez": 1_024,
"moussaKam/barthez-orangesum-title": 1_024,
}
__UpperCamelCase : Tuple = "▁"
class __lowerCAmelCase ( snake_case_ ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ["input_ids", "attention_mask"]
UpperCamelCase__ = BarthezTokenizer
def __init__( self :int , __magic_name__ :str=None , __magic_name__ :Any=None , __magic_name__ :Dict="<s>" , __magic_name__ :Optional[Any]="</s>" , __magic_name__ :str="</s>" , __magic_name__ :Any="<s>" , __magic_name__ :List[str]="<unk>" , __magic_name__ :Dict="<pad>" , __magic_name__ :Dict="<mask>" , **__magic_name__ :List[str] , ):
'''simple docstring'''
a = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
_A , tokenizer_file=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , **_A , )
a = vocab_file
a = False if not self.vocab_file else True
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a = [self.cls_token_id]
a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self :Any , __magic_name__ :str , __magic_name__ :Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a = os.path.join(
_A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 350 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__UpperCamelCase : Any = datasets.utils.logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase ( datasets.BuilderConfig ):
UpperCamelCase__ = None
UpperCamelCase__ = "utf-8"
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = True # deprecated
UpperCamelCase__ = None # deprecated
UpperCamelCase__ = 10 << 20 # 10MB
UpperCamelCase__ = None
class __lowerCAmelCase ( datasets.ArrowBasedBuilder ):
UpperCamelCase__ = JsonConfig
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
a = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
a = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__magic_name__ , (str, list, tuple) ):
a = data_files
if isinstance(__magic_name__ , __magic_name__ ):
a = [files]
a = [dl_manager.iter_files(__magic_name__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
a = []
for split_name, files in data_files.items():
if isinstance(__magic_name__ , __magic_name__ ):
a = [files]
a = [dl_manager.iter_files(__magic_name__ ) for file in files]
splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"""files""": files} ) )
return splits
def lowerCamelCase__ ( self :List[str] , __magic_name__ :pa.Table ):
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
a = self.config.features.arrow_schema.field(__magic_name__ ).type
a = pa_table.append_column(__magic_name__ , pa.array([None] * len(__magic_name__ ) , type=__magic_name__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
a = table_cast(__magic_name__ , self.config.features.arrow_schema )
return pa_table
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Union[str, Any] ):
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
a = json.load(__magic_name__ )
# We keep only the field we are interested in
a = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__magic_name__ , (list, tuple) ):
a = set().union(*[row.keys() for row in dataset] )
a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
else:
a = dataset
a = pa.Table.from_pydict(__magic_name__ )
yield file_idx, self._cast_table(__magic_name__ )
# If the file has one json object per line
else:
with open(__magic_name__ , """rb""" ) as f:
a = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
a = max(self.config.chunksize // 32 , 16 << 10 )
a = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
a = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__magic_name__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
a = batch.decode(self.config.encoding , errors=__magic_name__ ).encode("""utf-8""" )
try:
while True:
try:
a = paj.read_json(
io.BytesIO(__magic_name__ ) , read_options=paj.ReadOptions(block_size=__magic_name__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__magic_name__ , pa.ArrowInvalid )
and "straddling" not in str(__magic_name__ )
or block_size > len(__magic_name__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'Batch of {len(__magic_name__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
a = json.load(__magic_name__ )
except json.JSONDecodeError:
logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__magic_name__ , __magic_name__ ): # list is the only sequence type supported in JSON
try:
a = set().union(*[row.keys() for row in dataset] )
a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
a = pa.Table.from_pydict(__magic_name__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' )
raise ValueError(F'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(__magic_name__ )
break
else:
logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' )
raise ValueError(
F'Not able to read records in the JSON file at {file}. '
F'You should probably indicate the field of the JSON file containing your records. '
F'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
F'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__magic_name__ )
batch_idx += 1
| 347 | 0 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : int = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class __lowerCAmelCase ( _SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = '''efficientformer'''
def __init__( self :Any , __magic_name__ :List[int] = [3, 2, 6, 4] , __magic_name__ :List[int] = [48, 96, 224, 448] , __magic_name__ :List[bool] = [True, True, True, True] , __magic_name__ :int = 448 , __magic_name__ :int = 32 , __magic_name__ :int = 4 , __magic_name__ :int = 7 , __magic_name__ :int = 5 , __magic_name__ :int = 8 , __magic_name__ :int = 4 , __magic_name__ :float = 0.0 , __magic_name__ :int = 16 , __magic_name__ :int = 3 , __magic_name__ :int = 3 , __magic_name__ :int = 3 , __magic_name__ :int = 2 , __magic_name__ :int = 1 , __magic_name__ :float = 0.0 , __magic_name__ :int = 1 , __magic_name__ :bool = True , __magic_name__ :bool = True , __magic_name__ :float = 1E-5 , __magic_name__ :str = "gelu" , __magic_name__ :float = 0.02 , __magic_name__ :float = 1E-1_2 , __magic_name__ :int = 224 , __magic_name__ :float = 1E-0_5 , **__magic_name__ :int , ):
'''simple docstring'''
super().__init__(**__magic_name__ )
a = hidden_act
a = hidden_dropout_prob
a = hidden_sizes
a = num_hidden_layers
a = num_attention_heads
a = initializer_range
a = layer_norm_eps
a = patch_size
a = num_channels
a = depths
a = mlp_expansion_ratio
a = downsamples
a = dim
a = key_dim
a = attention_ratio
a = resolution
a = pool_size
a = downsample_patch_size
a = downsample_stride
a = downsample_pad
a = drop_path_rate
a = num_metaad_blocks
a = distillation
a = use_layer_scale
a = layer_scale_init_value
a = image_size
a = batch_norm_eps
| 351 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self :List[str] , __magic_name__ :int="</s>" , __magic_name__ :List[Any]="<unk>" , __magic_name__ :Optional[Any]="<pad>" , __magic_name__ :Optional[int]=125 , __magic_name__ :List[str]=None , **__magic_name__ :List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
a = [F'<extra_id_{i}>' for i in range(__magic_name__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
a = len(set(filter(lambda __magic_name__ : bool("""extra_id""" in str(__magic_name__ ) ) , __magic_name__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token
super().__init__(
eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , extra_ids=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , )
a = extra_ids
a = 2**8 # utf is 8 bits
# define special tokens dict
a = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
a = len(self.special_tokens_encoder )
a = len(__magic_name__ )
for i, token in enumerate(__magic_name__ ):
a = self.vocab_size + i - n
a = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def lowerCamelCase__ ( self :Any , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None , __magic_name__ :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__magic_name__ )) + [1]
return ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1]
def lowerCamelCase__ ( self :str , __magic_name__ :List[int] ):
'''simple docstring'''
if len(__magic_name__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = self._add_eos_if_not_present(__magic_name__ )
if token_ids_a is None:
return token_ids_a
else:
a = self._add_eos_if_not_present(__magic_name__ )
return token_ids_a + token_ids_a
def lowerCamelCase__ ( self :List[str] , __magic_name__ :str ):
'''simple docstring'''
a = [chr(__magic_name__ ) for i in text.encode("""utf-8""" )]
return tokens
def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ):
'''simple docstring'''
if token in self.special_tokens_encoder:
a = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
a = self.added_tokens_encoder[token]
elif len(__magic_name__ ) != 1:
a = self.unk_token_id
else:
a = ord(__magic_name__ ) + self._num_special_tokens
return token_id
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Dict ):
'''simple docstring'''
if index in self.special_tokens_decoder:
a = self.special_tokens_decoder[index]
else:
a = chr(index - self._num_special_tokens )
return token
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Optional[int] ):
'''simple docstring'''
a = b""""""
for token in tokens:
if token in self.special_tokens_decoder:
a = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
a = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
a = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
a = token.encode("""utf-8""" )
else:
a = bytes([ord(__magic_name__ )] )
bstring += tok_string
a = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :str , __magic_name__ :Optional[str] = None ):
'''simple docstring'''
return ()
| 347 | 0 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class __lowerCAmelCase ( __lowercase ):
UpperCamelCase__ = "MCTCTFeatureExtractor"
UpperCamelCase__ = "AutoTokenizer"
def __init__( self :Any , __magic_name__ :List[Any] , __magic_name__ :List[Any] ):
'''simple docstring'''
super().__init__(snake_case_ , snake_case_ )
a = self.feature_extractor
a = False
def __call__( self :Dict , *__magic_name__ :Union[str, Any] , **__magic_name__ :Tuple ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*snake_case_ , **snake_case_ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
a = kwargs.pop("""raw_speech""" )
else:
a = kwargs.pop("""audio""" , snake_case_ )
a = kwargs.pop("""sampling_rate""" , snake_case_ )
a = kwargs.pop("""text""" , snake_case_ )
if len(snake_case_ ) > 0:
a = args[0]
a = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
a = self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ )
if text is not None:
a = self.tokenizer(snake_case_ , **snake_case_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a = encodings['''input_ids''']
return inputs
def lowerCamelCase__ ( self :int , *__magic_name__ :List[str] , **__magic_name__ :int ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self :Union[str, Any] , *__magic_name__ :Optional[Any] , **__magic_name__ :int ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case_ , **snake_case_ )
a = kwargs.pop("""input_features""" , snake_case_ )
a = kwargs.pop("""labels""" , snake_case_ )
if len(snake_case_ ) > 0:
a = args[0]
a = args[1:]
if input_features is not None:
a = self.feature_extractor.pad(snake_case_ , *snake_case_ , **snake_case_ )
if labels is not None:
a = self.tokenizer.pad(snake_case_ , **snake_case_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
a = labels['''input_ids''']
return input_features
def lowerCamelCase__ ( self :Optional[int] , *__magic_name__ :Any , **__magic_name__ :Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@contextmanager
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
a = True
a = self.tokenizer
yield
a = self.feature_extractor
a = False
| 352 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCAmelCase :
def __init__( self :Optional[int] , __magic_name__ :str , __magic_name__ :int=2 , __magic_name__ :List[str]=3 , __magic_name__ :Optional[int]=4 , __magic_name__ :str=2 , __magic_name__ :Any=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Dict=True , __magic_name__ :List[Any]=99 , __magic_name__ :Dict=36 , __magic_name__ :Optional[Any]=3 , __magic_name__ :str=4 , __magic_name__ :Optional[Any]=37 , __magic_name__ :Dict="gelu" , __magic_name__ :Any=0.1 , __magic_name__ :Union[str, Any]=0.1 , __magic_name__ :Dict=512 , __magic_name__ :str=16 , __magic_name__ :List[Any]=2 , __magic_name__ :Tuple=0.02 , __magic_name__ :Any=6 , __magic_name__ :Optional[int]=6 , __magic_name__ :Tuple=3 , __magic_name__ :str=4 , __magic_name__ :List[str]=None , __magic_name__ :str=1000 , ):
'''simple docstring'''
a = parent
a = batch_size
a = num_channels
a = image_size
a = patch_size
a = text_seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = coordinate_size
a = shape_size
a = num_labels
a = num_choices
a = scope
a = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a = text_seq_length
a = (image_size // patch_size) ** 2 + 1
a = self.text_seq_length + self.image_seq_length
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
a = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a = bbox[i, j, 3]
a = bbox[i, j, 1]
a = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a = bbox[i, j, 2]
a = bbox[i, j, 0]
a = t
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.text_seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
a = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase__ ( self :int , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :List[str] , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int ):
'''simple docstring'''
a = LayoutLMvaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# text + image
a = model(__magic_name__ , pixel_values=__magic_name__ )
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , token_type_ids=__magic_name__ )
a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
a = model(__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
a = model(pixel_values=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :List[Any] , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :List[str] , __magic_name__ :List[str] ):
'''simple docstring'''
a = self.num_labels
a = LayoutLMvaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Dict , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :List[str] , __magic_name__ :Tuple ):
'''simple docstring'''
a = self.num_labels
a = LayoutLMvaForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :str , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :str , __magic_name__ :List[str] , __magic_name__ :Optional[int] , __magic_name__ :Optional[Any] ):
'''simple docstring'''
a = LayoutLMvaForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :List[Any] ):
'''simple docstring'''
return True
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = LayoutLMvaModelTester(self )
a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :Any=False ):
'''simple docstring'''
a = copy.deepcopy(__magic_name__ )
if model_class in get_values(__magic_name__ ):
a = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__magic_name__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__magic_name__ ):
a = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
elif model_class in get_values(__magic_name__ ):
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
elif model_class in [
*get_values(__magic_name__ ),
]:
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
elif model_class in [
*get_values(__magic_name__ ),
]:
a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__magic_name__ , )
return inputs_dict
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a = type
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@slow
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = LayoutLMvaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __A ( ) -> str:
a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(__magic_name__ )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=__magic_name__ , return_tensors="""pt""" ).pixel_values.to(__magic_name__ )
a = torch.tensor([[1, 2]] )
a = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
a = model(
input_ids=input_ids.to(__magic_name__ ) , bbox=bbox.to(__magic_name__ ) , pixel_values=pixel_values.to(__magic_name__ ) , )
# verify the logits
a = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ )
a = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 347 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__UpperCamelCase : Dict = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__UpperCamelCase : Any = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__UpperCamelCase : Optional[int] = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__UpperCamelCase : Dict = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
__UpperCamelCase : str = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
__UpperCamelCase : Optional[int] = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
__UpperCamelCase : Optional[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
__UpperCamelCase : List[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
__UpperCamelCase : Optional[int] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = DPRContextEncoderTokenizer
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = DPRQuestionEncoderTokenizer
__UpperCamelCase : List[Any] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
__UpperCamelCase : List[str] = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
__UpperCamelCase : int = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(__magic_name__ )
class __lowerCAmelCase :
def __call__( self :Dict , __magic_name__ :Optional[Any] , __magic_name__ :Optional[str] = None , __magic_name__ :Optional[str] = None , __magic_name__ :Union[bool, str] = False , __magic_name__ :Union[bool, str] = False , __magic_name__ :Optional[int] = None , __magic_name__ :Optional[Union[str, TensorType]] = None , __magic_name__ :Optional[bool] = None , **__magic_name__ :Union[str, Any] , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , return_tensors=__magic_name__ , return_attention_mask=__magic_name__ , **__magic_name__ , )
elif titles is None or texts is None:
a = titles if texts is None else texts
return super().__call__(
__magic_name__ , __magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , return_tensors=__magic_name__ , return_attention_mask=__magic_name__ , **__magic_name__ , )
a = titles if not isinstance(__magic_name__ , __magic_name__ ) else [titles]
a = texts if not isinstance(__magic_name__ , __magic_name__ ) else [texts]
a = len(__magic_name__ )
a = questions if not isinstance(__magic_name__ , __magic_name__ ) else [questions] * n_passages
assert len(__magic_name__ ) == len(
__magic_name__ ), F'There should be as many titles than texts but got {len(__magic_name__ )} titles and {len(__magic_name__ )} texts.'
a = super().__call__(__magic_name__ , __magic_name__ , padding=__magic_name__ , truncation=__magic_name__ )["""input_ids"""]
a = super().__call__(__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ )["""input_ids"""]
a = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__magic_name__ , __magic_name__ )
]
}
if return_attention_mask is not False:
a = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
a = attention_mask
return self.pad(__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , return_tensors=__magic_name__ )
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :BatchEncoding , __magic_name__ :DPRReaderOutput , __magic_name__ :int = 16 , __magic_name__ :int = 64 , __magic_name__ :int = 4 , ):
'''simple docstring'''
a = reader_input["""input_ids"""]
a , a , a = reader_output[:3]
a = len(__magic_name__ )
a = sorted(range(__magic_name__ ) , reverse=__magic_name__ , key=relevance_logits.__getitem__ )
a = []
for doc_id in sorted_docs:
a = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
a = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
a = sequence_ids.index(self.pad_token_id )
else:
a = len(__magic_name__ )
a = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__magic_name__ , top_spans=__magic_name__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__magic_name__ , start_index=__magic_name__ , end_index=__magic_name__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__magic_name__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase__ ( self :Any , __magic_name__ :List[int] , __magic_name__ :List[int] , __magic_name__ :int , __magic_name__ :int , ):
'''simple docstring'''
a = []
for start_index, start_score in enumerate(__magic_name__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
a = sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ )
a = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'Wrong span indices: [{start_index}:{end_index}]'
a = end_index - start_index + 1
assert length <= max_answer_length, F'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__magic_name__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__magic_name__ )
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = ["input_ids", "attention_mask"]
UpperCamelCase__ = DPRReaderTokenizer
| 353 |
from copy import deepcopy
class __lowerCAmelCase :
def __init__( self :Union[str, Any] , __magic_name__ :list[int] | None = None , __magic_name__ :int | None = None ):
'''simple docstring'''
if arr is None and size is not None:
a = size
a = [0] * size
elif arr is not None:
self.init(__magic_name__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def lowerCamelCase__ ( self :Dict , __magic_name__ :list[int] ):
'''simple docstring'''
a = len(__magic_name__ )
a = deepcopy(__magic_name__ )
for i in range(1 , self.size ):
a = self.next_(__magic_name__ )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
a = self.next_(__magic_name__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCamelCase__ ( __magic_name__ :int ):
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def lowerCamelCase__ ( __magic_name__ :int ):
'''simple docstring'''
return index - (index & (-index))
def lowerCamelCase__ ( self :Any , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
a = self.next_(__magic_name__ )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
self.add(__magic_name__ , value - self.get(__magic_name__ ) )
def lowerCamelCase__ ( self :int , __magic_name__ :int ):
'''simple docstring'''
if right == 0:
return 0
a = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
a = self.prev(__magic_name__ )
return result
def lowerCamelCase__ ( self :int , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
return self.prefix(__magic_name__ ) - self.prefix(__magic_name__ )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :int ):
'''simple docstring'''
return self.query(__magic_name__ , index + 1 )
def lowerCamelCase__ ( self :Dict , __magic_name__ :int ):
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
a = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
a = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__UpperCamelCase : str = logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase ( lowercase_ ):
UpperCamelCase__ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self :str , **__magic_name__ :List[str] ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
a = deprecated_arg[3:]
a = not kwargs.pop(a__ )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
a = kwargs.pop("""tpu_name""" , self.tpu_name )
a = kwargs.pop("""device_idx""" , self.device_idx )
a = kwargs.pop("""eager_mode""" , self.eager_mode )
a = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**a__ )
UpperCamelCase__ = field(
default=lowercase_ , metadata={'''help''': '''Name of TPU'''} , )
UpperCamelCase__ = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
UpperCamelCase__ = field(default=lowercase_ , metadata={'''help''': '''Benchmark models in eager model.'''} )
UpperCamelCase__ = field(
default=lowercase_ , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
a = None
if self.tpu:
try:
if self.tpu_name:
a = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
a = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
a = None
return tpu
@cached_property
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
a = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
a = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
a = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' )
return strategy
@property
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
return self.n_gpu > 0
| 354 |
from __future__ import annotations
from typing import Generic, TypeVar
__UpperCamelCase : Union[str, Any] = TypeVar("T")
class __lowerCAmelCase ( Generic[T] ):
def __init__( self :Tuple , __magic_name__ :T ):
'''simple docstring'''
a = data
a = self
a = 0
class __lowerCAmelCase ( Generic[T] ):
def __init__( self :Tuple ):
'''simple docstring'''
a = {}
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :T ):
'''simple docstring'''
a = DisjointSetTreeNode(__magic_name__ )
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :T ):
'''simple docstring'''
a = self.map[data]
if elem_ref != elem_ref.parent:
a = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :DisjointSetTreeNode[T] , __magic_name__ :DisjointSetTreeNode[T] ):
'''simple docstring'''
if nodea.rank > nodea.rank:
a = nodea
else:
a = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :T , __magic_name__ :T ):
'''simple docstring'''
self.link(self.find_set(__magic_name__ ) , self.find_set(__magic_name__ ) )
class __lowerCAmelCase ( Generic[T] ):
def __init__( self :Union[str, Any] ):
'''simple docstring'''
a = {}
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :T ):
'''simple docstring'''
if node not in self.connections:
a = {}
def lowerCamelCase__ ( self :Any , __magic_name__ :T , __magic_name__ :T , __magic_name__ :int ):
'''simple docstring'''
self.add_node(__magic_name__ )
self.add_node(__magic_name__ )
a = weight
a = weight
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = []
a = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __magic_name__ : x[2] )
# creating the disjoint set
a = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__magic_name__ )
# MST generation
a = 0
a = 0
a = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
a , a , a = edges[index]
index += 1
a = disjoint_set.find_set(__magic_name__ )
a = disjoint_set.find_set(__magic_name__ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__magic_name__ , __magic_name__ , __magic_name__ )
disjoint_set.union(__magic_name__ , __magic_name__ )
return graph
| 347 | 0 |
def lowercase__ ( __lowerCamelCase ) -> Union[str, Any]:
a = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
a = set()
return any(
node not in visited and depth_first_search(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for node in graph )
def lowercase__ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
visited.add(__lowerCamelCase )
rec_stk.add(__lowerCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__lowerCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 355 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = tempfile.mkdtemp()
a = BlipImageProcessor()
a = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
a = BlipProcessor(__magic_name__ , __magic_name__ )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self :List[Any] , **__magic_name__ :Union[str, Any] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer
def lowerCamelCase__ ( self :str , **__magic_name__ :List[str] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
a = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
a = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = self.prepare_image_inputs()
a = image_processor(__magic_name__ , return_tensors="""np""" )
a = processor(images=__magic_name__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = """lower newer"""
a = processor(text=__magic_name__ )
a = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = """lower newer"""
a = self.prepare_image_inputs()
a = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__magic_name__ )
a = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = """lower newer"""
a = self.prepare_image_inputs()
a = processor(text=__magic_name__ , images=__magic_name__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 347 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class __lowerCAmelCase ( lowerCamelCase__ ):
UpperCamelCase__ = '''blip_text_model'''
def __init__( self :List[str] , __magic_name__ :List[Any]=3_0524 , __magic_name__ :List[Any]=768 , __magic_name__ :Dict=768 , __magic_name__ :Optional[Any]=3072 , __magic_name__ :List[Any]=768 , __magic_name__ :List[str]=12 , __magic_name__ :int=8 , __magic_name__ :Optional[Any]=512 , __magic_name__ :Optional[Any]="gelu" , __magic_name__ :int=1E-1_2 , __magic_name__ :List[str]=0.0 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :List[Any]=0.02 , __magic_name__ :int=3_0522 , __magic_name__ :Any=2 , __magic_name__ :str=0 , __magic_name__ :Dict=102 , __magic_name__ :int=True , __magic_name__ :Any=True , **__magic_name__ :Optional[int] , ):
'''simple docstring'''
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , sep_token_id=__lowerCamelCase , **__lowerCamelCase , )
a = vocab_size
a = hidden_size
a = encoder_hidden_size
a = intermediate_size
a = projection_dim
a = hidden_dropout_prob
a = num_hidden_layers
a = num_attention_heads
a = max_position_embeddings
a = layer_norm_eps
a = hidden_act
a = initializer_range
a = attention_probs_dropout_prob
a = is_decoder
a = use_cache
@classmethod
def lowerCamelCase__ ( cls :Dict , __magic_name__ :Any , **__magic_name__ :List[Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(__lowerCamelCase )
a = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
a = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class __lowerCAmelCase ( lowerCamelCase__ ):
UpperCamelCase__ = '''blip_vision_model'''
def __init__( self :str , __magic_name__ :int=768 , __magic_name__ :Any=3072 , __magic_name__ :Union[str, Any]=512 , __magic_name__ :List[Any]=12 , __magic_name__ :Tuple=12 , __magic_name__ :Union[str, Any]=384 , __magic_name__ :List[Any]=16 , __magic_name__ :Union[str, Any]="gelu" , __magic_name__ :Any=1E-5 , __magic_name__ :Any=0.0 , __magic_name__ :Optional[int]=1E-1_0 , **__magic_name__ :Dict , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = hidden_size
a = intermediate_size
a = projection_dim
a = num_hidden_layers
a = num_attention_heads
a = patch_size
a = image_size
a = initializer_range
a = attention_dropout
a = layer_norm_eps
a = hidden_act
@classmethod
def lowerCamelCase__ ( cls :int , __magic_name__ :Dict , **__magic_name__ :Union[str, Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(__lowerCamelCase )
a = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
a = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class __lowerCAmelCase ( lowerCamelCase__ ):
UpperCamelCase__ = '''blip'''
UpperCamelCase__ = True
def __init__( self :Any , __magic_name__ :Optional[int]=None , __magic_name__ :Tuple=None , __magic_name__ :int=512 , __magic_name__ :Optional[Any]=2.6592 , __magic_name__ :str=256 , **__magic_name__ :Optional[Any] , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
if text_config is None:
a = {}
logger.info("""`text_config` is `None`. Initializing the `BlipTextConfig` with default values.""" )
if vision_config is None:
a = {}
logger.info("""`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.""" )
a = BlipTextConfig(**__lowerCamelCase )
a = BlipVisionConfig(**__lowerCamelCase )
a = self.vision_config.hidden_size
a = projection_dim
a = logit_scale_init_value
a = 1.0
a = 0.02
a = image_text_hidden_size
@classmethod
def lowerCamelCase__ ( cls :Optional[int] , __magic_name__ :List[Any] , __magic_name__ :Optional[int] , **__magic_name__ :Tuple ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowerCamelCase )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = copy.deepcopy(self.__dict__ )
a = self.text_config.to_dict()
a = self.vision_config.to_dict()
a = self.__class__.model_type
return output
| 356 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
UpperCamelCase__ = '''nat'''
UpperCamelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :Any , __magic_name__ :int=4 , __magic_name__ :Dict=3 , __magic_name__ :List[str]=64 , __magic_name__ :Optional[int]=[3, 4, 6, 5] , __magic_name__ :int=[2, 4, 8, 16] , __magic_name__ :str=7 , __magic_name__ :Tuple=3.0 , __magic_name__ :Dict=True , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.1 , __magic_name__ :Optional[Any]="gelu" , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :Tuple=1E-5 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :int=None , __magic_name__ :Any=None , **__magic_name__ :Dict , ):
'''simple docstring'''
super().__init__(**__magic_name__ )
a = patch_size
a = num_channels
a = embed_dim
a = depths
a = len(__magic_name__ )
a = num_heads
a = kernel_size
a = mlp_ratio
a = qkv_bias
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = drop_path_rate
a = hidden_act
a = layer_norm_eps
a = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) )
a = layer_scale_init_value
a = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(__magic_name__ ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
| 347 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
a = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
a = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
a = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
a = value
elif weight_type == "weight_g":
a = value
elif weight_type == "weight_v":
a = value
elif weight_type == "bias":
a = value
else:
a = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
a = []
a = fairseq_model.state_dict()
a = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
a = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , )
a = True
else:
for key, mapped_key in MAPPING.items():
a = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
a = True
if "*" in mapped_key:
a = name.split(UpperCAmelCase_ )[0].split(""".""" )[-2]
a = mapped_key.replace("""*""" , UpperCAmelCase_ )
if "weight_g" in name:
a = 'weight_g'
elif "weight_v" in name:
a = 'weight_v'
elif "weight" in name:
a = 'weight'
elif "bias" in name:
a = 'bias'
else:
a = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(f'Unused weights: {unused_weights}' )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
a = full_name.split("""conv_layers.""" )[-1]
a = name.split(""".""" )
a = int(items[0] )
a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase_ )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Any:
a = SEWConfig()
if is_finetuned:
a = model.wav_encoder.wav_model.cfg
else:
a = model.cfg
a = fs_config.conv_bias
a = eval(fs_config.conv_feature_layers )
a = [x[0] for x in conv_layers]
a = [x[1] for x in conv_layers]
a = [x[2] for x in conv_layers]
a = 'gelu'
a = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
a = 0.0
a = fs_config.activation_fn.name
a = fs_config.encoder_embed_dim
a = 0.02
a = fs_config.encoder_ffn_embed_dim
a = 1E-5
a = fs_config.encoder_layerdrop
a = fs_config.encoder_attention_heads
a = fs_config.conv_pos_groups
a = fs_config.conv_pos
a = len(UpperCAmelCase_ )
a = fs_config.encoder_layers
a = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
a = model.cfg
a = fs_config.final_dropout
a = fs_config.layerdrop
a = fs_config.activation_dropout
a = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
a = fs_config.attention_dropout
a = fs_config.dropout_input
a = fs_config.dropout
a = fs_config.mask_channel_length
a = fs_config.mask_channel_prob
a = fs_config.mask_length
a = fs_config.mask_prob
a = 'Wav2Vec2FeatureExtractor'
a = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=True ) -> Union[str, Any]:
if is_finetuned:
a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
a = SEWConfig.from_pretrained(UpperCAmelCase_ )
else:
a = convert_config(model[0] , UpperCAmelCase_ )
a = model[0].eval()
a = True if config.feat_extract_norm == 'layer' else False
a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
if is_finetuned:
if dict_path:
a = Dictionary.load(UpperCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a = target_dict.pad_index
a = target_dict.bos_index
a = target_dict.pad_index
a = target_dict.bos_index
a = target_dict.eos_index
a = len(target_dict.symbols )
a = os.path.join(UpperCAmelCase_ , """vocab.json""" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(UpperCAmelCase_ ) )
return
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , UpperCAmelCase_ )
a = WavaVecaCTCTokenizer(
UpperCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=UpperCAmelCase_ , )
a = WavaVecaProcessor(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
a = SEWForCTC(UpperCAmelCase_ )
else:
a = SEWModel(UpperCAmelCase_ )
feature_extractor.save_pretrained(UpperCAmelCase_ )
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
hf_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 357 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
a = flax_key_tuple[:-1] + ("""weight""",)
a = torch.permute(__lowerCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ):
# linear layer
a = flax_key_tuple[:-1] + ("""weight""",)
a = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
if "metadata" in layer:
a = layer.split("""metadata""" )
a = """""".join(split_layer[0] )[:-1]
a = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
a = layer.split("""kvstore""" )
a = """""".join(split_layer[0] )[:-1]
a = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
a = layer.split("""/""" )
a = """/""".join(split_layer[:-1] )
a = (split_layer[-1],)
if "kvstore/path" in layer:
a = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
a = """file"""
else:
a = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
a = rename_keys(__lowerCamelCase )
a = {}
for k, v in current_block.items():
a = v
a = new_current_block
torch.save(__lowerCamelCase , __lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = WEIGHTS_NAME ) -> List[str]:
a = convert_file_size_to_int(__lowerCamelCase )
a = []
a = {}
a = 0
a = 0
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
a = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
a = flatten_dict(__lowerCamelCase , sep="""/""" )
a = {}
for layer in checkpoint_info.keys():
a , a , a = get_key_and_tensorstore_dict(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if curr_real_layer_name in all_layers:
a = content
else:
a = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
a = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
a = torch.tensor(__lowerCamelCase )
a = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
a , a = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __lowerCamelCase )
a = """/""".join(__lowerCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
a = os.path.join(
__lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(__lowerCamelCase , __lowerCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
a = {}
a = 0
a = raw_weights.to(getattr(__lowerCamelCase , __lowerCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(__lowerCamelCase , __lowerCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__lowerCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
a = {}
a = {}
for idx, shard in enumerate(__lowerCamelCase ):
a = weights_name.replace(
""".bin""" , f'-{idx+1:05d}-of-{len(__lowerCamelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
a = shard
for key in shard:
a = shard_file
# Add the metadata
a = {"""total_size""": total_size}
a = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
a = json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase ) + """\n"""
f.write(__lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase : Any = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __A ( ) -> Tuple:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
a = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
a = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
a = TaTokenizer.from_pretrained("""t5-small""" )
a = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
a = tokenizer(__lowerCamelCase , return_tensors="""pt""" ).input_ids
a = model.generate(__lowerCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 347 | 0 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__UpperCamelCase : Optional[int] = datasets.load_iris()
__UpperCamelCase : List[Any] = np.array(data["data"])
__UpperCamelCase : Union[str, Any] = np.array(data["target"])
__UpperCamelCase : Optional[int] = data['target_names']
__UpperCamelCase : Dict = train_test_split(X, y)
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
return np.linalg.norm(np.array(_A ) - np.array(_A ) )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=5 ) -> Tuple:
a = zip(_A , _A )
# List of distances of all points from the point to be classified
a = []
for data_point in data:
a = euclidean_distance(data_point[0] , _A )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
a = [i[1] for i in sorted(_A )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
a = Counter(_A ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 358 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__UpperCamelCase : Union[str, Any] = (720, 1_280) # Height, Width
__UpperCamelCase : Any = (0.4, 0.6) # if height or width lower than this scale, drop it.
__UpperCamelCase : str = 1 / 100
__UpperCamelCase : Optional[int] = ""
__UpperCamelCase : List[Any] = ""
__UpperCamelCase : Union[str, Any] = ""
__UpperCamelCase : Tuple = 250
def __A ( ) -> None:
a , a = get_dataset(__lowerCamelCase , __lowerCamelCase )
for index in range(__lowerCamelCase ):
a = random.sample(range(len(__lowerCamelCase ) ) , 4 )
a , a , a = update_image_and_anno(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , filter_scale=__lowerCamelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
a = random_chars(32 )
a = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
a = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'
cva.imwrite(f'{file_root}.jpg' , __lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' )
a = []
for anno in new_annos:
a = anno[3] - anno[1]
a = anno[4] - anno[2]
a = anno[1] + width / 2
a = anno[2] + height / 2
a = f'{anno[0]} {x_center} {y_center} {width} {height}'
annos_list.append(__lowerCamelCase )
with open(f'{file_root}.txt' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> tuple[list, list]:
a = []
a = []
for label_file in glob.glob(os.path.join(__lowerCamelCase , """*.txt""" ) ):
a = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(__lowerCamelCase ) as in_file:
a = in_file.readlines()
a = os.path.join(__lowerCamelCase , f'{label_name}.jpg' )
a = []
for obj_list in obj_lists:
a = obj_list.rstrip("""\n""" ).split(""" """ )
a = float(obj[1] ) - float(obj[3] ) / 2
a = float(obj[2] ) - float(obj[4] ) / 2
a = float(obj[1] ) + float(obj[3] ) / 2
a = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__lowerCamelCase )
labels.append(__lowerCamelCase )
return img_paths, labels
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 0.0 , ) -> tuple[list, list, str]:
a = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
a = int(scale_x * output_size[1] )
a = int(scale_y * output_size[0] )
a = []
a = []
for i, index in enumerate(__lowerCamelCase ):
a = all_img_list[index]
path_list.append(__lowerCamelCase )
a = all_annos[index]
a = cva.imread(__lowerCamelCase )
if i == 0: # top-left
a = cva.resize(__lowerCamelCase , (divid_point_x, divid_point_y) )
a = img
for bbox in img_annos:
a = bbox[1] * scale_x
a = bbox[2] * scale_y
a = bbox[3] * scale_x
a = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
a = cva.resize(__lowerCamelCase , (output_size[1] - divid_point_x, divid_point_y) )
a = img
for bbox in img_annos:
a = scale_x + bbox[1] * (1 - scale_x)
a = bbox[2] * scale_y
a = scale_x + bbox[3] * (1 - scale_x)
a = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
a = cva.resize(__lowerCamelCase , (divid_point_x, output_size[0] - divid_point_y) )
a = img
for bbox in img_annos:
a = bbox[1] * scale_x
a = scale_y + bbox[2] * (1 - scale_y)
a = bbox[3] * scale_x
a = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
a = cva.resize(
__lowerCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
a = img
for bbox in img_annos:
a = scale_x + bbox[1] * (1 - scale_x)
a = scale_y + bbox[2] * (1 - scale_y)
a = scale_x + bbox[3] * (1 - scale_x)
a = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
a = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __A ( __lowerCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
a = ascii_lowercase + digits
return "".join(random.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 347 | 0 |
import unittest
from knapsack import greedy_knapsack as kp
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a : Dict = [10, 20, 30, 40, 50, 60]
a : Dict = [2, 4, 6, 8, 10, 12]
a : str = 100
self.assertEqual(kp.calc_profit(A__ , A__ , A__ ) , 210 )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
self.assertRaisesRegex(A__ , """max_weight must greater than zero.""" )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
self.assertRaisesRegex(A__ , """Weight can not be negative.""" )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
self.assertRaisesRegex(A__ , """Profit can not be negative.""" )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
self.assertRaisesRegex(A__ , """max_weight must greater than zero.""" )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
self.assertRaisesRegex(
A__ , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 359 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Optional[Any] = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ["MobileNetV2FeatureExtractor"]
__UpperCamelCase : Tuple = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 347 | 0 |
"""simple docstring"""
import math
import sys
def __A ( __lowerCamelCase ) -> int:
if number != int(__lowerCamelCase ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
a = [-1] * (number + 1)
a = 0
for i in range(1 , number + 1 ):
a = sys.maxsize
a = int(math.sqrt(__lowerCamelCase ) )
for j in range(1 , root + 1 ):
a = 1 + answers[i - (j**2)]
a = min(__lowerCamelCase , __lowerCamelCase )
a = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
def __A ( __lowerCamelCase ) -> bool:
if num < 0:
return False
a = num
a = 0
while num > 0:
a = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 0 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__UpperCamelCase : Optional[Any] = get_logger(__name__)
class __lowerCAmelCase :
def __init__( self :Any , __magic_name__ :Optional[str] = None ):
'''simple docstring'''
a = (
os.path.join(__A , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
a = Extractor
def lowerCamelCase__ ( self :str , __magic_name__ :str ):
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
a = os.path.abspath(__A )
return os.path.join(self.extract_dir , hash_url_to_filename(__A ) )
def lowerCamelCase__ ( self :int , __magic_name__ :str , __magic_name__ :bool ):
'''simple docstring'''
return force_extract or (
not os.path.isfile(__A ) and not (os.path.isdir(__A ) and os.listdir(__A ))
)
def lowerCamelCase__ ( self :str , __magic_name__ :str , __magic_name__ :bool = False ):
'''simple docstring'''
a = self.extractor.infer_extractor_format(__A )
if not extractor_format:
return input_path
a = self._get_output_path(__A )
if self._do_extract(__A , __A ):
self.extractor.extract(__A , __A , __A )
return output_path
class __lowerCAmelCase ( lowerCamelCase__ ):
@classmethod
@abstractmethod
def lowerCamelCase__ ( cls :int , __magic_name__ :Union[Path, str] , **__magic_name__ :List[Any] ):
'''simple docstring'''
...
@staticmethod
@abstractmethod
def lowerCamelCase__ ( __magic_name__ :Union[Path, str] , __magic_name__ :Union[Path, str] ):
'''simple docstring'''
...
class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ = []
@staticmethod
def lowerCamelCase__ ( __magic_name__ :Union[Path, str] , __magic_name__ :int ):
'''simple docstring'''
with open(__A , """rb""" ) as f:
return f.read(__A )
@classmethod
def lowerCamelCase__ ( cls :List[str] , __magic_name__ :Union[Path, str] , __magic_name__ :bytes = b"" ):
'''simple docstring'''
if not magic_number:
a = max(len(__A ) for cls_magic_number in cls.magic_numbers )
try:
a = cls.read_magic_number(__A , __A )
except OSError:
return False
return any(magic_number.startswith(__A ) for cls_magic_number in cls.magic_numbers )
class __lowerCAmelCase ( lowerCamelCase__ ):
@classmethod
def lowerCamelCase__ ( cls :Union[str, Any] , __magic_name__ :Union[Path, str] , **__magic_name__ :int ):
'''simple docstring'''
return tarfile.is_tarfile(__A )
@staticmethod
def lowerCamelCase__ ( __magic_name__ :List[Any] , __magic_name__ :Any ):
'''simple docstring'''
def resolved(__magic_name__ :str ) -> str:
return os.path.realpath(os.path.abspath(__A ) )
def badpath(__magic_name__ :str , __magic_name__ :str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__A , __A ) ).startswith(__A )
def badlink(__magic_name__ :List[Any] , __magic_name__ :str ) -> bool:
# Links are interpreted relative to the directory containing the link
a = resolved(os.path.join(__A , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__A )
a = resolved(__A )
for finfo in members:
if badpath(finfo.name , __A ):
logger.error(F'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(__A , __A ):
logger.error(F'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(__A , __A ):
logger.error(F'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def lowerCamelCase__ ( __magic_name__ :Union[Path, str] , __magic_name__ :Union[Path, str] ):
'''simple docstring'''
os.makedirs(__A , exist_ok=__A )
a = tarfile.open(__A )
tar_file.extractall(__A , members=TarExtractor.safemembers(__A , __A ) )
tar_file.close()
class __lowerCAmelCase ( lowerCamelCase__ ):
UpperCamelCase__ = [b'''\x1F\x8B''']
@staticmethod
def lowerCamelCase__ ( __magic_name__ :Union[Path, str] , __magic_name__ :Union[Path, str] ):
'''simple docstring'''
with gzip.open(__A , """rb""" ) as gzip_file:
with open(__A , """wb""" ) as extracted_file:
shutil.copyfileobj(__A , __A )
class __lowerCAmelCase ( lowerCamelCase__ ):
UpperCamelCase__ = [
b'''PK\x03\x04''',
b'''PK\x05\x06''', # empty archive
b'''PK\x07\x08''', # spanned archive
]
@classmethod
def lowerCamelCase__ ( cls :Union[str, Any] , __magic_name__ :Union[Path, str] , __magic_name__ :bytes = b"" ):
'''simple docstring'''
if super().is_extractable(__A , magic_number=__A ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__A , """rb""" ) as fp:
a = _EndRecData(__A )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
a = fp.read(__A ) # CD is where we expect it to be
if len(__A ) == sizeCentralDir:
a = struct.unpack(__A , __A ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowerCamelCase__ ( __magic_name__ :Union[Path, str] , __magic_name__ :Union[Path, str] ):
'''simple docstring'''
os.makedirs(__A , exist_ok=__A )
with zipfile.ZipFile(__A , """r""" ) as zip_file:
zip_file.extractall(__A )
zip_file.close()
class __lowerCAmelCase ( lowerCamelCase__ ):
UpperCamelCase__ = [b'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def lowerCamelCase__ ( __magic_name__ :Union[Path, str] , __magic_name__ :Union[Path, str] ):
'''simple docstring'''
with lzma.open(__A ) as compressed_file:
with open(__A , """wb""" ) as extracted_file:
shutil.copyfileobj(__A , __A )
class __lowerCAmelCase ( lowerCamelCase__ ):
UpperCamelCase__ = [b'''Rar!\x1a\x07\x00''', b'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def lowerCamelCase__ ( __magic_name__ :Union[Path, str] , __magic_name__ :Union[Path, str] ):
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(__A , exist_ok=__A )
a = rarfile.RarFile(__A )
rf.extractall(__A )
rf.close()
class __lowerCAmelCase ( lowerCamelCase__ ):
UpperCamelCase__ = [b'''\x28\xb5\x2F\xFD''']
@staticmethod
def lowerCamelCase__ ( __magic_name__ :Union[Path, str] , __magic_name__ :Union[Path, str] ):
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
a = zstd.ZstdDecompressor()
with open(__A , """rb""" ) as ifh, open(__A , """wb""" ) as ofh:
dctx.copy_stream(__A , __A )
class __lowerCAmelCase ( lowerCamelCase__ ):
UpperCamelCase__ = [b'''\x42\x5A\x68''']
@staticmethod
def lowerCamelCase__ ( __magic_name__ :Union[Path, str] , __magic_name__ :Union[Path, str] ):
'''simple docstring'''
with bza.open(__A , """rb""" ) as compressed_file:
with open(__A , """wb""" ) as extracted_file:
shutil.copyfileobj(__A , __A )
class __lowerCAmelCase ( lowerCamelCase__ ):
UpperCamelCase__ = [b'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def lowerCamelCase__ ( __magic_name__ :Union[Path, str] , __magic_name__ :Union[Path, str] ):
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(__A , exist_ok=__A )
with pyazr.SevenZipFile(__A , """r""" ) as archive:
archive.extractall(__A )
class __lowerCAmelCase ( lowerCamelCase__ ):
UpperCamelCase__ = [b'''\x04\x22\x4D\x18''']
@staticmethod
def lowerCamelCase__ ( __magic_name__ :Union[Path, str] , __magic_name__ :Union[Path, str] ):
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(__A , """rb""" ) as compressed_file:
with open(__A , """wb""" ) as extracted_file:
shutil.copyfileobj(__A , __A )
class __lowerCAmelCase :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
UpperCamelCase__ = {
'''tar''': TarExtractor,
'''gzip''': GzipExtractor,
'''zip''': ZipExtractor,
'''xz''': XzExtractor,
'''rar''': RarExtractor,
'''zstd''': ZstdExtractor,
'''bz2''': BzipaExtractor,
'''7z''': SevenZipExtractor, # <Added version="2.4.0"/>
'''lz4''': LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowerCamelCase__ ( cls :Dict ):
'''simple docstring'''
return max(
len(__A )
for extractor in cls.extractors.values()
if issubclass(__A , __A )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowerCamelCase__ ( __magic_name__ :Union[Path, str] , __magic_name__ :int ):
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(__A , magic_number_length=__A )
except OSError:
return b""
@classmethod
def lowerCamelCase__ ( cls :Dict , __magic_name__ :Union[Path, str] , __magic_name__ :bool = False ):
'''simple docstring'''
warnings.warn(
"""Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use \'infer_extractor_format\' instead.""" , category=__A , )
a = cls.infer_extractor_format(__A )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowerCamelCase__ ( cls :Optional[Any] , __magic_name__ :Union[Path, str] ): # <Added version="2.4.0"/>
'''simple docstring'''
a = cls._get_magic_number_max_length()
a = cls._read_magic_number(__A , __A )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__A , magic_number=__A ):
return extractor_format
@classmethod
def lowerCamelCase__ ( cls :Optional[Any] , __magic_name__ :Union[Path, str] , __magic_name__ :Union[Path, str] , __magic_name__ :Optional[str] = None , __magic_name__ :Optional[BaseExtractor] = "deprecated" , ):
'''simple docstring'''
os.makedirs(os.path.dirname(__A ) , exist_ok=__A )
# Prevent parallel extractions
a = str(Path(__A ).with_suffix(""".lock""" ) )
with FileLock(__A ):
shutil.rmtree(__A , ignore_errors=__A )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__A , __A ): # passed as positional arg
warnings.warn(
"""Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use \'extractor_format\' instead.""" , category=__A , )
a = extractor if extractor != '''deprecated''' else extractor_format
else:
a = cls.extractors[extractor_format]
return extractor.extract(__A , __A )
else:
warnings.warn(
"""Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=__A , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__A ):
return extractor.extract(__A , __A )
| 361 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = CanineTokenizer
UpperCamelCase__ = False
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
super().setUp()
a = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def lowerCamelCase__ ( self :Tuple , **__magic_name__ :Dict ):
'''simple docstring'''
a = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
a = 1024
return tokenizer
@require_torch
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.canine_tokenizer
a = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
a = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.canine_tokenizer
a = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , __magic_name__ )
self.assertIn("""attention_mask""" , __magic_name__ )
self.assertIn("""token_type_ids""" , __magic_name__ )
@require_torch
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.canine_tokenizer
a = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
a = tokenizer(
text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a = tempfile.mkdtemp()
a = """ He is very happy, UNwant\u00E9d,running"""
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
a = tokenizer.__class__.from_pretrained(__magic_name__ )
a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
shutil.rmtree(__magic_name__ )
a = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a = tempfile.mkdtemp()
a = """ He is very happy, UNwant\u00E9d,running"""
a = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
a = chr(0Xe_0_0_7 )
additional_special_tokens.append(__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
a = tokenizer.__class__.from_pretrained(__magic_name__ )
a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
a = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__magic_name__ )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a , a = self.get_clean_sequence(__magic_name__ )
# a special token for Canine can be defined as follows:
a = 0Xe_0_0_5
a = chr(__magic_name__ )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
a = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , input_encoded + special_token_id )
a = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
self.assertTrue(special_token not in decoded )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a = chr(0Xe_0_0_5 )
a = chr(0Xe_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
a = tokenizer.tokenize(__magic_name__ )
a = tokenizer.tokenize(__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(token_a[0] , __magic_name__ )
self.assertEqual(token_a[0] , __magic_name__ )
@require_tokenizers
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
a = 0Xe_0_0_6
a = chr(__magic_name__ )
a = AddedToken(__magic_name__ , lstrip=__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__magic_name__ )
tokenizer.from_pretrained(__magic_name__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
a = json.load(__magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
a = json.load(__magic_name__ )
# a special token for Canine can be defined as follows:
a = 0Xe_0_0_6
a = chr(__magic_name__ )
a = [new_token_a]
a = [new_token_a]
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
a = 0Xe_0_0_7
a = chr(__magic_name__ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a = [AddedToken(__magic_name__ , lstrip=__magic_name__ )]
a = tokenizer_class.from_pretrained(
__magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a = """hello world"""
if self.space_between_special_tokens:
a = """[CLS] hello world [SEP]"""
else:
a = input
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__magic_name__ , [output, output.lower()] )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
a = """a"""
a = ord(__magic_name__ )
for attr in attributes_list:
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] )
a = 0Xe_0_0_6
a = chr(__magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
| 347 | 0 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class __lowerCAmelCase ( __magic_name__ ):
def __init__( self :Any , __magic_name__ :Dict="" , __magic_name__ :Dict="train" ):
'''simple docstring'''
assert os.path.isdir(__a )
a = []
a = os.listdir(__a )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
a = os.path.join(__a , __a )
if not os.path.isfile(__a ):
continue
self.documents.append(__a )
def __len__( self :Dict ):
'''simple docstring'''
return len(self.documents )
def __getitem__( self :str , __magic_name__ :int ):
'''simple docstring'''
a = self.documents[idx]
a = document_path.split("""/""" )[-1]
with open(__a , encoding="""utf-8""" ) as source:
a = source.read()
a , a = process_story(__a )
return document_name, story_lines, summary_lines
def __A ( __lowerCamelCase ) -> Tuple:
a = list(filter(lambda __lowerCamelCase : len(__snake_case ) != 0 , [line.strip() for line in raw_story.split("""\n""" )] ) )
# for some unknown reason some lines miss a period, add it
a = [_add_missing_period(__snake_case ) for line in nonempty_lines]
# gather article lines
a = []
a = deque(__snake_case )
while True:
try:
a = lines.popleft()
if element.startswith("""@highlight""" ):
break
story_lines.append(__snake_case )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
a = list(filter(lambda __lowerCamelCase : not t.startswith("""@highlight""" ) , __snake_case ) )
return story_lines, summary_lines
def __A ( __lowerCamelCase ) -> Union[str, Any]:
a = [""".""", """!""", """?""", """...""", """\'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""]
if line.startswith("""@highlight""" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
if len(__snake_case ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__snake_case )) )
return sequence
def __A ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
a = torch.ones_like(__snake_case )
a = sequence == pad_token_id
a = 0
return mask
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
a = [tokenizer.encode(__snake_case ) for line in story_lines]
a = [token for sentence in story_lines_token_ids for token in sentence]
a = [tokenizer.encode(__snake_case ) for line in summary_lines]
a = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
a = []
for sequence in batch:
a = -1
a = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__snake_case )
return torch.tensor(__snake_case ) | 362 |
def __A ( __lowerCamelCase ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __lowerCAmelCase ( _snake_case ):
UpperCamelCase__ = '''Speech2TextFeatureExtractor'''
UpperCamelCase__ = '''Speech2TextTokenizer'''
def __init__( self :List[str] , __magic_name__ :int , __magic_name__ :List[str] ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
a = self.feature_extractor
a = False
def __call__( self :str , *__magic_name__ :int , **__magic_name__ :int ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase__ , **UpperCamelCase__ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
a = kwargs.pop("""raw_speech""" )
else:
a = kwargs.pop("""audio""" , UpperCamelCase__ )
a = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
a = kwargs.pop("""text""" , UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
a = args[0]
a = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
a = self.feature_extractor(UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None:
a = self.tokenizer(UpperCamelCase__ , **UpperCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a = encodings["""input_ids"""]
return inputs
def lowerCamelCase__ ( self :Any , *__magic_name__ :Dict , **__magic_name__ :Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase__ ( self :List[str] , *__magic_name__ :Optional[int] , **__magic_name__ :str ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@contextmanager
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
a = True
a = self.tokenizer
yield
a = self.feature_extractor
a = False
| 363 |
def __A ( __lowerCamelCase ) -> int:
if not numbers:
return 0
if not isinstance(__lowerCamelCase , (list, tuple) ) or not all(
isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
a = a = a = numbers[0]
for i in range(1 , len(__lowerCamelCase ) ):
# update the maximum and minimum subarray products
a = numbers[i]
if number < 0:
a , a = min_till_now, max_till_now
a = max(__lowerCamelCase , max_till_now * number )
a = min(__lowerCamelCase , min_till_now * number )
# update the maximum product found till now
a = max(__lowerCamelCase , __lowerCamelCase )
return max_prod
| 347 | 0 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def lowerCamelCase__ ( *__magic_name__ :List[str] , **__magic_name__ :List[Any] ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :List[Any] , __magic_name__ :Any , __magic_name__ :List[str] ):
'''simple docstring'''
a = ObjectDetectionPipeline(model=_A , image_processor=_A )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Any , __magic_name__ :List[Any] ):
'''simple docstring'''
a = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(_A ) , 0 )
for detected_object in outputs:
self.assertEqual(
_A , {
"""score""": ANY(_A ),
"""label""": ANY(_A ),
"""box""": {"""xmin""": ANY(_A ), """ymin""": ANY(_A ), """xmax""": ANY(_A ), """ymax""": ANY(_A )},
} , )
import datasets
a = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
a = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
a = object_detector(_A , threshold=0.0 )
self.assertEqual(len(_A ) , len(_A ) )
for outputs in batch_outputs:
self.assertGreater(len(_A ) , 0 )
for detected_object in outputs:
self.assertEqual(
_A , {
"""score""": ANY(_A ),
"""label""": ANY(_A ),
"""box""": {"""xmin""": ANY(_A ), """ymin""": ANY(_A ), """xmax""": ANY(_A ), """ymax""": ANY(_A )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
pass
@require_torch
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = """hf-internal-testing/tiny-detr-mobilenetsv3"""
a = AutoModelForObjectDetection.from_pretrained(_A )
a = AutoFeatureExtractor.from_pretrained(_A )
a = ObjectDetectionPipeline(model=_A , feature_extractor=_A )
a = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
a = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = """facebook/detr-resnet-50"""
a = AutoModelForObjectDetection.from_pretrained(_A )
a = AutoFeatureExtractor.from_pretrained(_A )
a = ObjectDetectionPipeline(model=_A , feature_extractor=_A )
a = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
a = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = """facebook/detr-resnet-50"""
a = pipeline("""object-detection""" , model=_A )
a = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
a = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = 0.9985
a = """facebook/detr-resnet-50"""
a = pipeline("""object-detection""" , model=_A )
a = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=_A )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = """Narsil/layoutlmv3-finetuned-funsd"""
a = 0.9993
a = pipeline("""object-detection""" , model=_A , threshold=_A )
a = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , )
| 364 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Optional[Any] = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 347 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : int = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class __lowerCAmelCase ( a__ ):
UpperCamelCase__ = "xlnet"
UpperCamelCase__ = ["mems"]
UpperCamelCase__ = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self :List[Any] , __magic_name__ :Any=3_2000 , __magic_name__ :Tuple=1024 , __magic_name__ :Any=24 , __magic_name__ :Union[str, Any]=16 , __magic_name__ :Optional[int]=4096 , __magic_name__ :Optional[Any]="gelu" , __magic_name__ :Any=True , __magic_name__ :Optional[Any]="bi" , __magic_name__ :Dict=0.02 , __magic_name__ :Union[str, Any]=1E-1_2 , __magic_name__ :List[Any]=0.1 , __magic_name__ :List[Any]=512 , __magic_name__ :Optional[int]=None , __magic_name__ :int=True , __magic_name__ :Optional[int]=False , __magic_name__ :Tuple=False , __magic_name__ :Union[str, Any]=-1 , __magic_name__ :int=False , __magic_name__ :Union[str, Any]="last" , __magic_name__ :int=True , __magic_name__ :List[str]="tanh" , __magic_name__ :Any=0.1 , __magic_name__ :Union[str, Any]=5 , __magic_name__ :Dict=5 , __magic_name__ :Any=5 , __magic_name__ :str=1 , __magic_name__ :Tuple=2 , **__magic_name__ :Optional[int] , ):
'''simple docstring'''
a = vocab_size
a = d_model
a = n_layer
a = n_head
if d_model % n_head != 0:
raise ValueError(F'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
a = d_model // n_head
a = ff_activation
a = d_inner
a = untie_r
a = attn_type
a = initializer_range
a = layer_norm_eps
a = dropout
a = mem_len
a = reuse_len
a = bi_data
a = clamp_len
a = same_length
a = summary_type
a = summary_use_proj
a = summary_activation
a = summary_last_dropout
a = start_n_top
a = end_n_top
a = bos_token_id
a = pad_token_id
a = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , SCREAMING_SNAKE_CASE_ , )
a = kwargs['use_cache']
a = use_mems_eval
a = use_mems_train
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Optional[int] ):
'''simple docstring'''
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 365 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , __lowerCamelCase )
a = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
a = dataset_size < in_memory_max_size
else:
a = False
a = is_small_dataset(__lowerCamelCase )
assert result == expected
| 347 | 0 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : str = False
def __A ( __lowerCamelCase ) -> List[str]:
return TrainCommand(lowerCAmelCase__ )
class __lowerCAmelCase ( _SCREAMING_SNAKE_CASE ):
@staticmethod
def lowerCamelCase__ ( __magic_name__ :Optional[int] ):
'''simple docstring'''
a = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=__magic_name__ , required=__magic_name__ , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=__magic_name__ , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=__magic_name__ , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=__magic_name__ , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=__magic_name__ , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=__magic_name__ , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=__magic_name__ , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=__magic_name__ , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=__magic_name__ , default="""bert-base-uncased""" , help="""Model\'s name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=__magic_name__ , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=__magic_name__ , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=__magic_name__ , default=3E-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=__magic_name__ , default=1E-0_8 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=__magic_name__ )
def __init__( self :Union[str, Any] , __magic_name__ :Tuple ):
'''simple docstring'''
a = logging.get_logger("""transformers-cli/training""" )
a = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=__magic_name__ )
a = args.output
a = args.column_label
a = args.column_text
a = args.column_id
self.logger.info(F'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
a = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'Loading dataset from {args.train_data}' )
a = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
a = None
if args.validation_data:
self.logger.info(F'Loading validation dataset from {args.validation_data}' )
a = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
a = args.validation_split
a = args.train_batch_size
a = args.valid_batch_size
a = args.learning_rate
a = args.adam_epsilon
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 366 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __A ( __lowerCamelCase ) -> bool:
a = int(number**0.5 )
return number == sq * sq
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> tuple[int, int]:
a = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
a = x_den * y_den * z_den
a = gcd(__lowerCamelCase , __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def __A ( __lowerCamelCase = 35 ) -> int:
a = set()
a = 42
a = Fraction(0 )
a = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
a = x_num * y_den + x_den * y_num
a = x_den * y_den
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
a = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
a = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
a = int(sqrt(__lowerCamelCase ) )
a = int(sqrt(__lowerCamelCase ) )
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
a = x_num * y_num
a = x_den * y_num + x_num * y_den
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
a = x_num * x_num * y_num * y_num
a = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
a = int(sqrt(__lowerCamelCase ) )
a = int(sqrt(__lowerCamelCase ) )
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase , __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'{solution() = }')
| 347 | 0 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
logging.set_verbosity_info()
def __A ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
if "xprophetnet" in prophetnet_checkpoint_path:
a = XLMProphetNetForConditionalGenerationOld.from_pretrained(__lowerCamelCase )
a = XLMProphetNetForConditionalGeneration.from_pretrained(
__lowerCamelCase , output_loading_info=__lowerCamelCase )
else:
a = ProphetNetForConditionalGenerationOld.from_pretrained(__lowerCamelCase )
a = ProphetNetForConditionalGeneration.from_pretrained(
__lowerCamelCase , output_loading_info=__lowerCamelCase )
a = ['''key_proj''', '''value_proj''', '''query_proj''']
a = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
a = key.split(""".""" )
if attributes[0] == "lm_head":
a = prophet
a = prophet_old
else:
a = prophet.prophetnet
a = prophet_old.model
a = False
for attribute in attributes:
if attribute in mapping:
a = mapping[attribute]
if not hasattr(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) > 0:
a = attribute
elif hasattr(__lowerCamelCase , __lowerCamelCase ):
a = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
a = old_model.weight
logger.info(f'{attribute} is initialized.' )
a = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
a = old_model.bias
logger.info(f'{attribute} is initialized' )
a = True
break
elif attribute in special_keys and hasattr(__lowerCamelCase , """in_proj_weight""" ):
a = old_model.in_proj_weight.shape[0] // 3
a = getattr(__lowerCamelCase , __lowerCamelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
a = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
a = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
a = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
a = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
a = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
a = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
a = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
a = nn.Parameter(old_model.embed_positions.weight[:512, :] )
a = True
break
if attribute.isdigit():
a = model[int(__lowerCamelCase )]
a = old_model[int(__lowerCamelCase )]
else:
a = getattr(__lowerCamelCase , __lowerCamelCase )
if old_attribute == "":
a = old_model
else:
if not hasattr(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(f'{old_model} does not have {old_attribute}' )
a = getattr(__lowerCamelCase , __lowerCamelCase )
if not is_key_init:
raise ValueError(f'{key} was not correctly initialized!' )
print(f'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 367 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self :List[str] , __magic_name__ :List[str] , __magic_name__ :List[Any]=13 , __magic_name__ :Any=7 , __magic_name__ :Optional[int]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :Any=99 , __magic_name__ :List[str]=32 , __magic_name__ :List[str]=5 , __magic_name__ :str=4 , __magic_name__ :str=37 , __magic_name__ :Optional[int]="gelu" , __magic_name__ :int=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :List[str]=512 , __magic_name__ :Tuple=16 , __magic_name__ :Tuple=2 , __magic_name__ :List[str]=0.02 , __magic_name__ :Any=4 , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_attention_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_choices
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_attention_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = FlaxRoFormerModelTester(self )
@slow
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__magic_name__ )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
a = jnp.array([[0, 1, 2, 3, 4, 5]] )
a = model(__magic_name__ )[0]
a = 5_0000
a = (1, 6, vocab_size)
self.assertEqual(output.shape , __magic_name__ )
a = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 347 | 0 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : int = 'true'
def __A ( __lowerCamelCase , __lowerCamelCase=82 , __lowerCamelCase=16 ) -> Union[str, Any]:
set_seed(42 )
a = RegressionModel()
a = deepcopy(snake_case_ )
a = RegressionDataset(length=snake_case_ )
a = DataLoader(snake_case_ , batch_size=snake_case_ )
model.to(accelerator.device )
a , a = accelerator.prepare(snake_case_ , snake_case_ )
return model, ddp_model, dataloader
def __A ( __lowerCamelCase , __lowerCamelCase=False ) -> str:
a = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
a = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(__lowerCamelCase ):
a = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
with accelerator.main_process_first():
a = dataset.map(
snake_case_ , batched=snake_case_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
a = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__lowerCamelCase ):
if use_longest:
return tokenizer.pad(snake_case_ , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(snake_case_ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return DataLoader(snake_case_ , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=16 )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
a = Accelerator(dispatch_batches=snake_case_ , split_batches=snake_case_ )
a = get_dataloader(snake_case_ , not dispatch_batches )
a = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=snake_case_ )
a , a = accelerator.prepare(snake_case_ , snake_case_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
a = []
for batch in dataloader:
a , a = batch.values()
with torch.no_grad():
a = model(snake_case_ )
a , a = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a , a = [], []
for logit, targ in logits_and_targets:
logits.append(snake_case_ )
targs.append(snake_case_ )
a , a = torch.cat(snake_case_ ), torch.cat(snake_case_ )
return logits, targs
def __A ( __lowerCamelCase , __lowerCamelCase=82 , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=16 ) -> Any:
a , a , a = get_basic_setup(snake_case_ , snake_case_ , snake_case_ )
a , a = generate_predictions(snake_case_ , snake_case_ , snake_case_ )
assert (
len(snake_case_ ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case_ )}'
def __A ( __lowerCamelCase = False , __lowerCamelCase = False ) -> Tuple:
a = evaluate.load("""glue""" , """mrpc""" )
a , a = get_mrpc_setup(snake_case_ , snake_case_ )
# First do baseline
a , a , a = setup["""no"""]
model.to(snake_case_ )
model.eval()
for batch in dataloader:
batch.to(snake_case_ )
with torch.inference_mode():
a = model(**snake_case_ )
a = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case_ , references=batch["""labels"""] )
a = metric.compute()
# Then do distributed
a , a , a = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
a = model(**snake_case_ )
a = outputs.logits.argmax(dim=-1 )
a = batch["""labels"""]
a , a = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case_ , references=snake_case_ )
a = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def __A ( ) -> List[str]:
a = Accelerator(split_batches=snake_case_ , dispatch_batches=snake_case_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(snake_case_ , snake_case_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a = Accelerator(split_batches=snake_case_ , dispatch_batches=snake_case_ )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(snake_case_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
a = Accelerator()
test_torch_metrics(snake_case_ , 512 )
accelerator.state._reset_state()
def __A ( __lowerCamelCase ) -> Optional[Any]:
main()
if __name__ == "__main__":
main()
| 368 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[int] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 347 | 0 |
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase : int = logging.getLogger()
def __A ( ) -> int:
a = argparse.ArgumentParser()
parser.add_argument("""-f""" )
a = parser.parse_args()
return args.f
def __A ( __lowerCamelCase ) -> Union[str, Any]:
a = {}
a = os.path.join(lowerCAmelCase_ , """all_results.json""" )
if os.path.exists(lowerCAmelCase_ ):
with open(lowerCAmelCase_ , """r""" ) as f:
a = json.load(lowerCAmelCase_ )
else:
raise ValueError(f'can\'t find {path}' )
return results
def __A ( ) -> List[str]:
a = torch.cuda.is_available() and torch_device == 'cuda'
return is_using_cuda and is_apex_available()
__UpperCamelCase : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( _UpperCamelCase ):
@classmethod
def lowerCamelCase__ ( cls :List[str] ):
'''simple docstring'''
a = tempfile.mkdtemp()
a = os.path.join(cls.tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
a = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def lowerCamelCase__ ( cls :Tuple ):
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = self.get_auto_remove_tmp_dir()
a = F'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
a = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = self.get_auto_remove_tmp_dir()
a = F'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
a = get_results(_UpperCAmelCase )
self.assertLess(result["""perplexity"""] , 100 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.get_auto_remove_tmp_dir()
a = F'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
a = get_results(_UpperCAmelCase )
self.assertLess(result["""perplexity"""] , 42 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = 7 if get_gpu_count() > 1 else 2
a = self.get_auto_remove_tmp_dir()
a = F'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
a = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertLess(result["""train_loss"""] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.get_auto_remove_tmp_dir()
a = F'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
a = get_results(_UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] , 28 )
self.assertGreaterEqual(result["""eval_exact"""] , 28 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.get_auto_remove_tmp_dir()
a = F'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
a = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = self.get_auto_remove_tmp_dir()
a = F'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
a = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["""eval_rouge1"""] , 10 )
self.assertGreaterEqual(result["""eval_rouge2"""] , 2 )
self.assertGreaterEqual(result["""eval_rougeL"""] , 7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = self.get_auto_remove_tmp_dir()
a = F'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
a = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["""eval_bleu"""] , 30 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """translation_no_trainer""" ) ) )
@slow
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCAmelCase )
a = self.get_auto_remove_tmp_dir()
a = F'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
a = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] , 0.10 )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.get_auto_remove_tmp_dir()
a = F'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
a = get_results(_UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """image_classification_no_trainer""" ) ) )
| 369 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = (IPNDMScheduler,)
UpperCamelCase__ = (('''num_inference_steps''', 50),)
def lowerCamelCase__ ( self :Any , **__magic_name__ :Optional[Any] ):
'''simple docstring'''
a = {"""num_train_timesteps""": 1000}
config.update(**__magic_name__ )
return config
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple=0 , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__magic_name__ )
a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
a = dummy_past_residuals[:]
if time_step is None:
a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
a = scheduler_class.from_pretrained(__magic_name__ )
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
a = dummy_past_residuals[:]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[Any]=0 , **__magic_name__ :Any ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals (must be after setting timesteps)
a = dummy_past_residuals[:]
if time_step is None:
a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
a = scheduler_class.from_pretrained(__magic_name__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residual (must be after setting timesteps)
a = dummy_past_residuals[:]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self :Optional[Any] , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__magic_name__ )
a = scheduler_class(**__magic_name__ )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter
scheduler.set_timesteps(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
a = model(__magic_name__ , __magic_name__ )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
a = model(__magic_name__ , __magic_name__ )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
return sample
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
if num_inference_steps is not None and hasattr(__magic_name__ , """set_timesteps""" ):
scheduler.set_timesteps(__magic_name__ )
elif num_inference_steps is not None and not hasattr(__magic_name__ , """set_timesteps""" ):
a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a = dummy_past_residuals[:]
a = scheduler.timesteps[5]
a = scheduler.timesteps[6]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__magic_name__ , time_step=__magic_name__ )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__magic_name__ , time_step=__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.full_loop()
a = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 347 | 0 |
import sys
import turtle
def __A ( __lowerCamelCase , __lowerCamelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(__SCREAMING_SNAKE_CASE , get_mid(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , get_mid(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , depth - 1 )
triangle(__SCREAMING_SNAKE_CASE , get_mid(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , get_mid(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , depth - 1 )
triangle(__SCREAMING_SNAKE_CASE , get_mid(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , get_mid(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
__UpperCamelCase : Optional[Any] = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
__UpperCamelCase : str = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 370 |
__UpperCamelCase : Dict = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def __A ( ) -> None:
a = input("""Enter message: """ )
a = input("""Enter key [alphanumeric]: """ )
a = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
a = """encrypt"""
a = encrypt_message(__lowerCamelCase , __lowerCamelCase )
elif mode.lower().startswith("""d""" ):
a = """decrypt"""
a = decrypt_message(__lowerCamelCase , __lowerCamelCase )
print(f'\n{mode.title()}ed message:' )
print(__lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
return translate_message(__lowerCamelCase , __lowerCamelCase , """encrypt""" )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
return translate_message(__lowerCamelCase , __lowerCamelCase , """decrypt""" )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
a = []
a = 0
a = key.upper()
for symbol in message:
a = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__lowerCamelCase ):
a = 0
else:
translated.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
if __name__ == "__main__":
main()
| 347 | 0 |
Subsets and Splits