code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
from maths.prime_check import is_prime def __A ( __lowerCamelCase ) -> int: if not isinstance(__lowerCamelCase , __lowerCamelCase ): a = f'Input value of [number={number}] must be an integer' raise TypeError(__lowerCamelCase ) if is_prime(__lowerCamelCase ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
347
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = (IPNDMScheduler,) UpperCamelCase__ = (('''num_inference_steps''', 50),) def lowerCamelCase__ ( self :Any , **__magic_name__ :Optional[Any] ): '''simple docstring''' a = {"""num_train_timesteps""": 1000} config.update(**__magic_name__ ) return config def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple=0 , **__magic_name__ :Optional[int] ): '''simple docstring''' a = dict(self.forward_default_kwargs ) a = kwargs.pop("""num_inference_steps""" , __magic_name__ ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__magic_name__ ) a = scheduler_class(**__magic_name__ ) scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residuals a = dummy_past_residuals[:] if time_step is None: a = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__magic_name__ ) a = scheduler_class.from_pretrained(__magic_name__ ) new_scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residuals a = dummy_past_residuals[:] a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[Any]=0 , **__magic_name__ :Any ): '''simple docstring''' a = dict(self.forward_default_kwargs ) a = kwargs.pop("""num_inference_steps""" , __magic_name__ ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__magic_name__ ) scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residuals (must be after setting timesteps) a = dummy_past_residuals[:] if time_step is None: a = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__magic_name__ ) a = scheduler_class.from_pretrained(__magic_name__ ) # copy over dummy past residuals new_scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residual (must be after setting timesteps) a = dummy_past_residuals[:] a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCamelCase__ ( self :Optional[Any] , **__magic_name__ :Optional[int] ): '''simple docstring''' a = self.scheduler_classes[0] a = self.get_scheduler_config(**__magic_name__ ) a = scheduler_class(**__magic_name__ ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter scheduler.set_timesteps(__magic_name__ ) for i, t in enumerate(scheduler.timesteps ): a = model(__magic_name__ , __magic_name__ ) a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample for i, t in enumerate(scheduler.timesteps ): a = model(__magic_name__ , __magic_name__ ) a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample return sample def lowerCamelCase__ ( self :str ): '''simple docstring''' a = dict(self.forward_default_kwargs ) a = kwargs.pop("""num_inference_steps""" , __magic_name__ ) for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__magic_name__ ) a = self.dummy_sample a = 0.1 * sample if num_inference_steps is not None and hasattr(__magic_name__ , """set_timesteps""" ): scheduler.set_timesteps(__magic_name__ ) elif num_inference_steps is not None and not hasattr(__magic_name__ , """set_timesteps""" ): a = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] a = dummy_past_residuals[:] a = scheduler.timesteps[5] a = scheduler.timesteps[6] a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=__magic_name__ , time_step=__magic_name__ ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=__magic_name__ , time_step=__magic_name__ ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.full_loop() a = torch.mean(torch.abs(__magic_name__ ) ) assert abs(result_mean.item() - 254_0529 ) < 10
347
1
__UpperCamelCase : int = "Alexander Joslin" import operator as op from .stack import Stack def __A ( __lowerCamelCase ) -> int: a = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub} a = Stack() a = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(__lowerCamelCase ) ) elif i in operators: # RULE 2 operator_stack.push(__lowerCamelCase ) elif i == ")": # RULE 4 a = operator_stack.peek() operator_stack.pop() a = operand_stack.peek() operand_stack.pop() a = operand_stack.peek() operand_stack.pop() a = operators[opr](__lowerCamelCase , __lowerCamelCase ) operand_stack.push(__lowerCamelCase ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": __UpperCamelCase : Tuple = "(5 + ((4 * 2) * (2 + 3)))" # answer = 45 print(F'{equation} = {dijkstras_two_stack_algorithm(equation)}')
347
__UpperCamelCase : Dict = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def __A ( ) -> None: a = input("""Enter message: """ ) a = input("""Enter key [alphanumeric]: """ ) a = input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): a = """encrypt""" a = encrypt_message(__lowerCamelCase , __lowerCamelCase ) elif mode.lower().startswith("""d""" ): a = """decrypt""" a = decrypt_message(__lowerCamelCase , __lowerCamelCase ) print(f'\n{mode.title()}ed message:' ) print(__lowerCamelCase ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> str: return translate_message(__lowerCamelCase , __lowerCamelCase , """encrypt""" ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> str: return translate_message(__lowerCamelCase , __lowerCamelCase , """decrypt""" ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str: a = [] a = 0 a = key.upper() for symbol in message: a = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(__lowerCamelCase ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(__lowerCamelCase ): a = 0 else: translated.append(__lowerCamelCase ) return "".join(__lowerCamelCase ) if __name__ == "__main__": main()
347
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) __UpperCamelCase : Optional[int] = { "hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = '''yolos''' def __init__( self :Optional[int] , __magic_name__ :Tuple=768 , __magic_name__ :Dict=12 , __magic_name__ :List[Any]=12 , __magic_name__ :str=3072 , __magic_name__ :Optional[int]="gelu" , __magic_name__ :Optional[Any]=0.0 , __magic_name__ :List[Any]=0.0 , __magic_name__ :Dict=0.02 , __magic_name__ :Optional[Any]=1E-1_2 , __magic_name__ :Optional[int]=[512, 864] , __magic_name__ :List[Any]=16 , __magic_name__ :Any=3 , __magic_name__ :List[str]=True , __magic_name__ :Union[str, Any]=100 , __magic_name__ :Dict=True , __magic_name__ :List[str]=False , __magic_name__ :List[str]=1 , __magic_name__ :str=5 , __magic_name__ :int=2 , __magic_name__ :str=5 , __magic_name__ :int=2 , __magic_name__ :int=0.1 , **__magic_name__ :int , ): '''simple docstring''' super().__init__(**__magic_name__ ) a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = initializer_range a = layer_norm_eps a = image_size a = patch_size a = num_channels a = qkv_bias a = num_detection_tokens a = use_mid_position_embeddings a = auxiliary_loss # Hungarian matcher a = class_cost a = bbox_cost a = giou_cost # Loss coefficients a = bbox_loss_coefficient a = giou_loss_coefficient a = eos_coefficient class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = version.parse('''1.11''' ) @property def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase__ ( self :int ): '''simple docstring''' return 1E-4 @property def lowerCamelCase__ ( self :int ): '''simple docstring''' return 12
347
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class __lowerCAmelCase ( unittest.TestCase ): def __init__( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Tuple=13 , __magic_name__ :List[Any]=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :List[str]=True , __magic_name__ :str=99 , __magic_name__ :Optional[Any]=32 , __magic_name__ :Union[str, Any]=5 , __magic_name__ :Any=4 , __magic_name__ :int=37 , __magic_name__ :Tuple="gelu" , __magic_name__ :List[str]=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :Tuple=512 , __magic_name__ :Dict=16 , __magic_name__ :Optional[int]=2 , __magic_name__ :Optional[int]=0.02 , __magic_name__ :Optional[Any]=4 , ): '''simple docstring''' a = parent a = batch_size a = seq_length a = is_training a = use_attention_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_choices def lowerCamelCase__ ( self :int ): '''simple docstring''' a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_attention_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = self.prepare_config_and_inputs() a , a , a , a = config_and_inputs a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = self.prepare_config_and_inputs() a , a , a , a = config_and_inputs a = True a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): UpperCamelCase__ = True UpperCamelCase__ = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = FlaxRobertaModelTester(self ) @slow def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: a = model_class_name.from_pretrained("""roberta-base""" , from_pt=__magic_name__ ) a = model(np.ones((1, 1) ) ) self.assertIsNotNone(__magic_name__ )
347
1
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
347
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline __UpperCamelCase : Any = datasets.utils.logging.get_logger(__name__) @dataclass class __lowerCAmelCase ( datasets.BuilderConfig ): UpperCamelCase__ = None UpperCamelCase__ = "utf-8" UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = True # deprecated UpperCamelCase__ = None # deprecated UpperCamelCase__ = 10 << 20 # 10MB UpperCamelCase__ = None class __lowerCAmelCase ( datasets.ArrowBasedBuilder ): UpperCamelCase__ = JsonConfig def lowerCamelCase__ ( self :str ): '''simple docstring''' if self.config.block_size is not None: logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" ) a = self.config.block_size if self.config.use_threads is not True: logger.warning( """The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" ) if self.config.newlines_in_values is not None: raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" ) return datasets.DatasetInfo(features=self.config.features ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ): '''simple docstring''' if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) a = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__magic_name__ , (str, list, tuple) ): a = data_files if isinstance(__magic_name__ , __magic_name__ ): a = [files] a = [dl_manager.iter_files(__magic_name__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] a = [] for split_name, files in data_files.items(): if isinstance(__magic_name__ , __magic_name__ ): a = [files] a = [dl_manager.iter_files(__magic_name__ ) for file in files] splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"""files""": files} ) ) return splits def lowerCamelCase__ ( self :List[str] , __magic_name__ :pa.Table ): '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): a = self.config.features.arrow_schema.field(__magic_name__ ).type a = pa_table.append_column(__magic_name__ , pa.array([None] * len(__magic_name__ ) , type=__magic_name__ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example a = table_cast(__magic_name__ , self.config.features.arrow_schema ) return pa_table def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Union[str, Any] ): '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: a = json.load(__magic_name__ ) # We keep only the field we are interested in a = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(__magic_name__ , (list, tuple) ): a = set().union(*[row.keys() for row in dataset] ) a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys} else: a = dataset a = pa.Table.from_pydict(__magic_name__ ) yield file_idx, self._cast_table(__magic_name__ ) # If the file has one json object per line else: with open(__magic_name__ , """rb""" ) as f: a = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small a = max(self.config.chunksize // 32 , 16 << 10 ) a = ( self.config.encoding_errors if self.config.encoding_errors is not None else """strict""" ) while True: a = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(__magic_name__ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": a = batch.decode(self.config.encoding , errors=__magic_name__ ).encode("""utf-8""" ) try: while True: try: a = paj.read_json( io.BytesIO(__magic_name__ ) , read_options=paj.ReadOptions(block_size=__magic_name__ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(__magic_name__ , pa.ArrowInvalid ) and "straddling" not in str(__magic_name__ ) or block_size > len(__magic_name__ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F'Batch of {len(__magic_name__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( __magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: a = json.load(__magic_name__ ) except json.JSONDecodeError: logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(__magic_name__ , __magic_name__ ): # list is the only sequence type supported in JSON try: a = set().union(*[row.keys() for row in dataset] ) a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys} a = pa.Table.from_pydict(__magic_name__ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' ) raise ValueError(F'Not able to read records in the JSON file at {file}.' ) from None yield file_idx, self._cast_table(__magic_name__ ) break else: logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' ) raise ValueError( F'Not able to read records in the JSON file at {file}. ' F'You should probably indicate the field of the JSON file containing your records. ' F'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ' F'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(__magic_name__ ) batch_idx += 1
347
1
import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' debug_launcher(test_script.main ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' debug_launcher(test_ops.main )
347
import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __UpperCamelCase : Dict = logging.get_logger(__name__) class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = ['''input_ids''', '''attention_mask'''] def __init__( self :List[str] , __magic_name__ :int="</s>" , __magic_name__ :List[Any]="<unk>" , __magic_name__ :Optional[Any]="<pad>" , __magic_name__ :Optional[int]=125 , __magic_name__ :List[str]=None , **__magic_name__ :List[str] , ): '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: a = [F'<extra_id_{i}>' for i in range(__magic_name__ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens a = len(set(filter(lambda __magic_name__ : bool("""extra_id""" in str(__magic_name__ ) ) , __magic_name__ ) ) ) if extra_tokens != extra_ids: raise ValueError( F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are' """ provided to ByT5Tokenizer. In this case the additional_special_tokens must include the""" """ extra_ids tokens""" ) a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token super().__init__( eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , extra_ids=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , ) a = extra_ids a = 2**8 # utf is 8 bits # define special tokens dict a = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } a = len(self.special_tokens_encoder ) a = len(__magic_name__ ) for i, token in enumerate(__magic_name__ ): a = self.vocab_size + i - n a = {v: k for k, v in self.special_tokens_encoder.items()} @property def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def lowerCamelCase__ ( self :Any , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None , __magic_name__ :bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(__magic_name__ )) + [1] return ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1] def lowerCamelCase__ ( self :str , __magic_name__ :List[int] ): '''simple docstring''' if len(__magic_name__ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated' """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ): '''simple docstring''' a = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ): '''simple docstring''' a = self._add_eos_if_not_present(__magic_name__ ) if token_ids_a is None: return token_ids_a else: a = self._add_eos_if_not_present(__magic_name__ ) return token_ids_a + token_ids_a def lowerCamelCase__ ( self :List[str] , __magic_name__ :str ): '''simple docstring''' a = [chr(__magic_name__ ) for i in text.encode("""utf-8""" )] return tokens def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ): '''simple docstring''' if token in self.special_tokens_encoder: a = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: a = self.added_tokens_encoder[token] elif len(__magic_name__ ) != 1: a = self.unk_token_id else: a = ord(__magic_name__ ) + self._num_special_tokens return token_id def lowerCamelCase__ ( self :List[str] , __magic_name__ :Dict ): '''simple docstring''' if index in self.special_tokens_decoder: a = self.special_tokens_decoder[index] else: a = chr(index - self._num_special_tokens ) return token def lowerCamelCase__ ( self :Tuple , __magic_name__ :Optional[int] ): '''simple docstring''' a = b"""""" for token in tokens: if token in self.special_tokens_decoder: a = self.special_tokens_decoder[token].encode("""utf-8""" ) elif token in self.added_tokens_decoder: a = self.special_tokens_decoder[token].encode("""utf-8""" ) elif token in self.special_tokens_encoder: a = token.encode("""utf-8""" ) elif token in self.added_tokens_encoder: a = token.encode("""utf-8""" ) else: a = bytes([ord(__magic_name__ )] ) bstring += tok_string a = bstring.decode("""utf-8""" , errors="""ignore""" ) return string def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :str , __magic_name__ :Optional[str] = None ): '''simple docstring''' return ()
347
1
def __A ( __lowerCamelCase ) -> "list[int]": if upper_limit < 0: raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" ) a = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 a = 1 if upper_limit > 0: a = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(__lowerCamelCase ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("\n********* Catalan Numbers Using Dynamic Programming ************\n") print("\n*** Enter -1 at any time to quit ***") print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="") try: while True: __UpperCamelCase : Union[str, Any] = int(input().strip()) if N < 0: print("\n********* Goodbye!! ************") break else: print(F'The Catalan numbers from 0 through {N} are:') print(catalan_numbers(N)) print("Try another upper limit for the sequence: ", end="") except (NameError, ValueError): print("\n********* Invalid input, goodbye! ************\n") import doctest doctest.testmod()
347
import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __lowerCAmelCase : def __init__( self :Optional[int] , __magic_name__ :str , __magic_name__ :int=2 , __magic_name__ :List[str]=3 , __magic_name__ :Optional[int]=4 , __magic_name__ :str=2 , __magic_name__ :Any=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Dict=True , __magic_name__ :List[Any]=99 , __magic_name__ :Dict=36 , __magic_name__ :Optional[Any]=3 , __magic_name__ :str=4 , __magic_name__ :Optional[Any]=37 , __magic_name__ :Dict="gelu" , __magic_name__ :Any=0.1 , __magic_name__ :Union[str, Any]=0.1 , __magic_name__ :Dict=512 , __magic_name__ :str=16 , __magic_name__ :List[Any]=2 , __magic_name__ :Tuple=0.02 , __magic_name__ :Any=6 , __magic_name__ :Optional[int]=6 , __magic_name__ :Tuple=3 , __magic_name__ :str=4 , __magic_name__ :List[str]=None , __magic_name__ :str=1000 , ): '''simple docstring''' a = parent a = batch_size a = num_channels a = image_size a = patch_size a = text_seq_length a = is_training a = use_input_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = coordinate_size a = shape_size a = num_labels a = num_choices a = scope a = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) a = text_seq_length a = (image_size // patch_size) ** 2 + 1 a = self.text_seq_length + self.image_seq_length def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) a = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: a = bbox[i, j, 3] a = bbox[i, j, 1] a = t if bbox[i, j, 2] < bbox[i, j, 0]: a = bbox[i, j, 2] a = bbox[i, j, 0] a = t a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a = None if self.use_input_mask: a = random_attention_mask([self.batch_size, self.text_seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) a = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowerCamelCase__ ( self :int , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :List[str] , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int ): '''simple docstring''' a = LayoutLMvaModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() # text + image a = model(__magic_name__ , pixel_values=__magic_name__ ) a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , token_type_ids=__magic_name__ ) a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only a = model(__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only a = model(pixel_values=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :List[Any] , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :List[str] , __magic_name__ :List[str] ): '''simple docstring''' a = self.num_labels a = LayoutLMvaForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :Dict , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :List[str] , __magic_name__ :Tuple ): '''simple docstring''' a = self.num_labels a = LayoutLMvaForTokenClassification(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :str , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :str , __magic_name__ :List[str] , __magic_name__ :Optional[int] , __magic_name__ :Optional[Any] ): '''simple docstring''' a = LayoutLMvaForQuestionAnswering(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) = config_and_inputs a = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) UpperCamelCase__ = ( {'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel} if is_torch_available() else {} ) def lowerCamelCase__ ( self :List[str] , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :List[Any] ): '''simple docstring''' return True def lowerCamelCase__ ( self :int ): '''simple docstring''' a = LayoutLMvaModelTester(self ) a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :Any=False ): '''simple docstring''' a = copy.deepcopy(__magic_name__ ) if model_class in get_values(__magic_name__ ): a = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(__magic_name__ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__magic_name__ ): a = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) elif model_class in get_values(__magic_name__ ): a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) elif model_class in [ *get_values(__magic_name__ ), ]: a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) elif model_class in [ *get_values(__magic_name__ ), ]: a = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__magic_name__ , ) return inputs_dict def lowerCamelCase__ ( self :str ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a = type self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__magic_name__ ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__magic_name__ ) @slow def lowerCamelCase__ ( self :int ): '''simple docstring''' for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = LayoutLMvaModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def __A ( ) -> str: a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None @slow def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(__magic_name__ ) a = self.default_image_processor a = prepare_img() a = image_processor(images=__magic_name__ , return_tensors="""pt""" ).pixel_values.to(__magic_name__ ) a = torch.tensor([[1, 2]] ) a = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass a = model( input_ids=input_ids.to(__magic_name__ ) , bbox=bbox.to(__magic_name__ ) , pixel_values=pixel_values.to(__magic_name__ ) , ) # verify the logits a = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ ) a = torch.tensor( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
347
1
import os import sys import unittest __UpperCamelCase : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __UpperCamelCase : Optional[Any] = os.path.join(git_repo_path, "src", "transformers") __UpperCamelCase : Tuple = "\n{0} = None\n" __UpperCamelCase : int = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n" __UpperCamelCase : str = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n" class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" ) self.assertIsNone(__magic_name__ ) a = find_backend(""" if not is_tokenizers_available():""" ) self.assertEqual(__magic_name__ , """tokenizers""" ) a = find_backend(""" if not is_tensorflow_text_available():""" ) self.assertEqual(__magic_name__ , """tensorflow_text""" ) a = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" ) self.assertEqual(__magic_name__ , """sentencepiece_and_tokenizers""" ) a = find_backend( """ if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" ) self.assertEqual(__magic_name__ , """sentencepiece_and_tensorflow_text""" ) a = find_backend( """ if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" ) self.assertEqual(__magic_name__ , """sentencepiece_and_tokenizers_and_vision""" ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""" , __magic_name__ ) self.assertIn("""tensorflow_text""" , __magic_name__ ) self.assertIn("""sentencepiece_and_tokenizers""" , __magic_name__ ) # Likewise, we can't assert on the exact content of a key self.assertIn("""BertModel""" , objects["""torch"""] ) self.assertIn("""TFBertModel""" , objects["""tf"""] ) self.assertIn("""FlaxBertModel""" , objects["""flax"""] ) self.assertIn("""BertModel""" , objects["""torch"""] ) self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] ) self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = create_dummy_object("""CONSTANT""" , """'torch'""" ) self.assertEqual(__magic_name__ , """\nCONSTANT = None\n""" ) a = create_dummy_object("""function""" , """'torch'""" ) self.assertEqual( __magic_name__ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" ) a = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') """ a = create_dummy_object("""FakeClass""" , """'torch'""" ) self.assertEqual(__magic_name__ , __magic_name__ ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) """ a = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} ) self.assertEqual(dummy_files["""torch"""] , __magic_name__ )
347
from copy import deepcopy class __lowerCAmelCase : def __init__( self :Union[str, Any] , __magic_name__ :list[int] | None = None , __magic_name__ :int | None = None ): '''simple docstring''' if arr is None and size is not None: a = size a = [0] * size elif arr is not None: self.init(__magic_name__ ) else: raise ValueError("""Either arr or size must be specified""" ) def lowerCamelCase__ ( self :Dict , __magic_name__ :list[int] ): '''simple docstring''' a = len(__magic_name__ ) a = deepcopy(__magic_name__ ) for i in range(1 , self.size ): a = self.next_(__magic_name__ ) if j < self.size: self.tree[j] += self.tree[i] def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): a = self.next_(__magic_name__ ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def lowerCamelCase__ ( __magic_name__ :int ): '''simple docstring''' return index + (index & (-index)) @staticmethod def lowerCamelCase__ ( __magic_name__ :int ): '''simple docstring''' return index - (index & (-index)) def lowerCamelCase__ ( self :Any , __magic_name__ :int , __magic_name__ :int ): '''simple docstring''' if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value a = self.next_(__magic_name__ ) def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :int , __magic_name__ :int ): '''simple docstring''' self.add(__magic_name__ , value - self.get(__magic_name__ ) ) def lowerCamelCase__ ( self :int , __magic_name__ :int ): '''simple docstring''' if right == 0: return 0 a = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] a = self.prev(__magic_name__ ) return result def lowerCamelCase__ ( self :int , __magic_name__ :int , __magic_name__ :int ): '''simple docstring''' return self.prefix(__magic_name__ ) - self.prefix(__magic_name__ ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :int ): '''simple docstring''' return self.query(__magic_name__ , index + 1 ) def lowerCamelCase__ ( self :Dict , __magic_name__ :int ): '''simple docstring''' value -= self.tree[0] if value < 0: return -1 a = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 a = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
347
1
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin __UpperCamelCase : int = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model") @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): UpperCamelCase__ = SpeechTaTokenizer UpperCamelCase__ = False UpperCamelCase__ = True def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing a = SpeechTaTokenizer(__magic_name__ ) a = AddedToken("""<mask>""" , lstrip=__magic_name__ , rstrip=__magic_name__ ) a = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Any ): '''simple docstring''' a = """this is a test""" a = """this is a test""" return input_text, output_text def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Optional[Any]=False , __magic_name__ :Any=20 , __magic_name__ :int=5 ): '''simple docstring''' a , a = self.get_input_output_texts(__magic_name__ ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) a = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) return text, ids def lowerCamelCase__ ( self :str ): '''simple docstring''' a = """<pad>""" a = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-4] , """œ""" ) self.assertEqual(vocab_keys[-2] , """<mask>""" ) self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" ) self.assertEqual(len(__magic_name__ ) , 81 ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a = tokenizer.vocab_size a = len(__magic_name__ ) self.assertNotEqual(__magic_name__ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) a = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""] a = tokenizer.add_tokens(__magic_name__ ) a = tokenizer.vocab_size a = len(__magic_name__ ) self.assertNotEqual(__magic_name__ , 0 ) self.assertEqual(__magic_name__ , __magic_name__ ) self.assertEqual(__magic_name__ , len(__magic_name__ ) ) self.assertEqual(__magic_name__ , all_size + len(__magic_name__ ) ) a = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=__magic_name__ ) self.assertGreaterEqual(len(__magic_name__ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) a = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""} a = tokenizer.add_special_tokens(__magic_name__ ) a = tokenizer.vocab_size a = len(__magic_name__ ) self.assertNotEqual(__magic_name__ , 0 ) self.assertEqual(__magic_name__ , __magic_name__ ) self.assertEqual(__magic_name__ , len(__magic_name__ ) ) self.assertEqual(__magic_name__ , all_size_a + len(__magic_name__ ) ) a = tokenizer.encode( """>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=__magic_name__ ) self.assertGreaterEqual(len(__magic_name__ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def lowerCamelCase__ ( self :str ): '''simple docstring''' pass def lowerCamelCase__ ( self :Any ): '''simple docstring''' pass def lowerCamelCase__ ( self :str ): '''simple docstring''' a = self.get_tokenizer() a = tokenizer.tokenize("""This is a test""" ) # fmt: off self.assertListEqual(__magic_name__ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) a = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __magic_name__ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) a = tokenizer.convert_tokens_to_ids(__magic_name__ ) # fmt: off self.assertListEqual(__magic_name__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on a = tokenizer.convert_ids_to_tokens(__magic_name__ ) self.assertListEqual( __magic_name__ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) @slow def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = [ """Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """ """general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """ """Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """ """models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""", """BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """ """conditioning on both left and right context in all layers.""", """The quick brown fox jumps over the lazy dog.""", ] # fmt: off a = { """input_ids""": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], """attention_mask""": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=__magic_name__ , )
347
from __future__ import annotations from typing import Generic, TypeVar __UpperCamelCase : Union[str, Any] = TypeVar("T") class __lowerCAmelCase ( Generic[T] ): def __init__( self :Tuple , __magic_name__ :T ): '''simple docstring''' a = data a = self a = 0 class __lowerCAmelCase ( Generic[T] ): def __init__( self :Tuple ): '''simple docstring''' a = {} def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :T ): '''simple docstring''' a = DisjointSetTreeNode(__magic_name__ ) def lowerCamelCase__ ( self :List[Any] , __magic_name__ :T ): '''simple docstring''' a = self.map[data] if elem_ref != elem_ref.parent: a = self.find_set(elem_ref.parent.data ) return elem_ref.parent def lowerCamelCase__ ( self :List[Any] , __magic_name__ :DisjointSetTreeNode[T] , __magic_name__ :DisjointSetTreeNode[T] ): '''simple docstring''' if nodea.rank > nodea.rank: a = nodea else: a = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :T , __magic_name__ :T ): '''simple docstring''' self.link(self.find_set(__magic_name__ ) , self.find_set(__magic_name__ ) ) class __lowerCAmelCase ( Generic[T] ): def __init__( self :Union[str, Any] ): '''simple docstring''' a = {} def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :T ): '''simple docstring''' if node not in self.connections: a = {} def lowerCamelCase__ ( self :Any , __magic_name__ :T , __magic_name__ :T , __magic_name__ :int ): '''simple docstring''' self.add_node(__magic_name__ ) self.add_node(__magic_name__ ) a = weight a = weight def lowerCamelCase__ ( self :int ): '''simple docstring''' a = [] a = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda __magic_name__ : x[2] ) # creating the disjoint set a = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(__magic_name__ ) # MST generation a = 0 a = 0 a = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: a , a , a = edges[index] index += 1 a = disjoint_set.find_set(__magic_name__ ) a = disjoint_set.find_set(__magic_name__ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(__magic_name__ , __magic_name__ , __magic_name__ ) disjoint_set.union(__magic_name__ , __magic_name__ ) return graph
347
1
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase : Union[str, Any] = logging.get_logger() def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True ) -> int: print(f'Converting {name}...' ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": a = timm.create_model("""levit_128s""" , pretrained=__lowerCamelCase ) else: a = timm.create_model("""levit_128""" , pretrained=__lowerCamelCase ) if hidden_sizes == 192: a = timm.create_model("""levit_192""" , pretrained=__lowerCamelCase ) if hidden_sizes == 256: a = timm.create_model("""levit_256""" , pretrained=__lowerCamelCase ) if hidden_sizes == 384: a = timm.create_model("""levit_384""" , pretrained=__lowerCamelCase ) from_model.eval() a = LevitForImageClassificationWithTeacher(__lowerCamelCase ).eval() a = OrderedDict() a = from_model.state_dict() a = list(from_model.state_dict().keys() ) a = list(our_model.state_dict().keys() ) print(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for i in range(len(__lowerCamelCase ) ): a = weights[og_keys[i]] our_model.load_state_dict(__lowerCamelCase ) a = torch.randn((2, 3, 224, 224) ) a = from_model(__lowerCamelCase ) a = our_model(__lowerCamelCase ).logits assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one." a = name print(__lowerCamelCase ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) a = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(f'Pushed {checkpoint_name}' ) def __A ( __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True ) -> Tuple: a = """imagenet-1k-id2label.json""" a = 1000 a = (1, num_labels) a = """huggingface/label-files""" a = num_labels a = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) ) a = {int(__lowerCamelCase ): v for k, v in idalabel.items()} a = idalabel a = {v: k for k, v in idalabel.items()} a = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase ) a = { """levit-128S""": 128, """levit-128""": 128, """levit-192""": 192, """levit-256""": 256, """levit-384""": 384, } a = { """levit-128S""": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), """levit-128""": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), """levit-192""": ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), """levit-256""": ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), """levit-384""": ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , __lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return config, expected_shape if __name__ == "__main__": __UpperCamelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,", ) parser.add_argument( "--pytorch_dump_folder_path", default="levit-dump-folder/", type=Path, required=False, help="Path to the output PyTorch model directory.", ) parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub") parser.add_argument( "--no-push_to_hub", dest="push_to_hub", action="store_false", help="Do not push model and image processor to the hub", ) __UpperCamelCase : Optional[Any] = parser.parse_args() __UpperCamelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
347
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = tempfile.mkdtemp() a = BlipImageProcessor() a = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) a = BlipProcessor(__magic_name__ , __magic_name__ ) processor.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self :List[Any] , **__magic_name__ :Union[str, Any] ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer def lowerCamelCase__ ( self :str , **__magic_name__ :List[str] ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor def lowerCamelCase__ ( self :int ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] a = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) a = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) a = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 ) a = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__magic_name__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __magic_name__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __magic_name__ ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = self.prepare_image_inputs() a = image_processor(__magic_name__ , return_tensors="""np""" ) a = processor(images=__magic_name__ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = """lower newer""" a = processor(text=__magic_name__ ) a = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = """lower newer""" a = self.prepare_image_inputs() a = processor(text=__magic_name__ , images=__magic_name__ ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(__magic_name__ ): processor() def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a = processor.batch_decode(__magic_name__ ) a = tokenizer.batch_decode(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = """lower newer""" a = self.prepare_image_inputs() a = processor(text=__magic_name__ , images=__magic_name__ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
347
1
def __A ( __lowerCamelCase = 200_0000 ) -> int: a = [0 for i in range(n + 1 )] a = 1 a = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , __lowerCamelCase ): a = 1 a = 0 for i in range(__lowerCamelCase ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(F'{solution() = }')
347
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) __UpperCamelCase : int = { "shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json", # See all Nat models at https://huggingface.co/models?filter=nat } class __lowerCAmelCase ( __magic_name__ , __magic_name__ ): UpperCamelCase__ = '''nat''' UpperCamelCase__ = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self :Any , __magic_name__ :int=4 , __magic_name__ :Dict=3 , __magic_name__ :List[str]=64 , __magic_name__ :Optional[int]=[3, 4, 6, 5] , __magic_name__ :int=[2, 4, 8, 16] , __magic_name__ :str=7 , __magic_name__ :Tuple=3.0 , __magic_name__ :Dict=True , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.1 , __magic_name__ :Optional[Any]="gelu" , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :Tuple=1E-5 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :int=None , __magic_name__ :Any=None , **__magic_name__ :Dict , ): '''simple docstring''' super().__init__(**__magic_name__ ) a = patch_size a = num_channels a = embed_dim a = depths a = len(__magic_name__ ) a = num_heads a = kernel_size a = mlp_ratio a = qkv_bias a = hidden_dropout_prob a = attention_probs_dropout_prob a = drop_path_rate a = hidden_act a = layer_norm_eps a = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model a = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) ) a = layer_scale_init_value a = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(__magic_name__ ) + 1 )] a , a = get_aligned_output_features_output_indices( out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
347
1
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class __lowerCAmelCase : UpperCamelCase__ = None def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.feature_extraction_class(**self.feat_extract_dict ) a = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , __magic_name__ ) def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: a = os.path.join(__magic_name__ , """feat_extract.json""" ) feat_extract_first.to_json_file(__magic_name__ ) a = self.feature_extraction_class.from_json_file(__magic_name__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: a = feat_extract_first.save_pretrained(__magic_name__ )[0] check_json_file_has_correct_format(__magic_name__ ) a = self.feature_extraction_class.from_pretrained(__magic_name__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = self.feature_extraction_class() self.assertIsNotNone(__magic_name__ )
347
import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]: if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer a = flax_key_tuple[:-1] + ("""weight""",) a = torch.permute(__lowerCamelCase , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ): # linear layer a = flax_key_tuple[:-1] + ("""weight""",) a = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: a = flax_key_tuple[:-1] + ("""weight""",) return flax_key_tuple, flax_tensor def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]: if "metadata" in layer: a = layer.split("""metadata""" ) a = """""".join(split_layer[0] )[:-1] a = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )] elif "kvstore" in layer: a = layer.split("""kvstore""" ) a = """""".join(split_layer[0] )[:-1] a = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )] else: a = layer.split("""/""" ) a = """/""".join(split_layer[:-1] ) a = (split_layer[-1],) if "kvstore/path" in layer: a = f'{switch_checkpoint_path}/{checkpoint_info[layer]}' elif "kvstore/driver" in layer: a = """file""" else: a = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def __A ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]: a = rename_keys(__lowerCamelCase ) a = {} for k, v in current_block.items(): a = v a = new_current_block torch.save(__lowerCamelCase , __lowerCamelCase ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = WEIGHTS_NAME ) -> List[str]: a = convert_file_size_to_int(__lowerCamelCase ) a = [] a = {} a = 0 a = 0 os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp: a = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""] a = flatten_dict(__lowerCamelCase , sep="""/""" ) a = {} for layer in checkpoint_info.keys(): a , a , a = get_key_and_tensorstore_dict( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if curr_real_layer_name in all_layers: a = content else: a = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file a = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() a = torch.tensor(__lowerCamelCase ) a = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts a , a = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __lowerCamelCase ) a = """/""".join(__lowerCamelCase ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: a = os.path.join( __lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) ) rename_and_save_block(__lowerCamelCase , __lowerCamelCase ) sharded_state_dicts.append(current_block.keys() ) del current_block a = {} a = 0 a = raw_weights.to(getattr(__lowerCamelCase , __lowerCamelCase ) ) current_block_size += weight_size total_size += weight_size # Add the last block a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) ) rename_and_save_block(__lowerCamelCase , __lowerCamelCase ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(__lowerCamelCase ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index a = {} a = {} for idx, shard in enumerate(__lowerCamelCase ): a = weights_name.replace( """.bin""" , f'-{idx+1:05d}-of-{len(__lowerCamelCase ):05d}.bin' ) # len(sharded_state_dicts):05d} a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{idx+1:05d}-of-???.bin' ) ) os.rename(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) ) a = shard for key in shard: a = shard_file # Add the metadata a = {"""total_size""": total_size} a = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f: a = json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase ) + """\n""" f.write(__lowerCamelCase ) return metadata, index if __name__ == "__main__": __UpperCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600", type=str, required=False, help="Path to a directory containing a folder per layer. Follows the original Google format.", ) parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size") parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model") parser.add_argument( "--pytorch_dump_folder_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted", type=str, required=False, help="Path to the output pytorch model.", ) __UpperCamelCase : Any = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def __A ( ) -> Tuple: from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer a = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" ) config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" ) a = SwitchTransformersForConditionalGeneration.from_pretrained( """/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" ) a = TaTokenizer.from_pretrained("""t5-small""" ) a = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""" a = tokenizer(__lowerCamelCase , return_tensors="""pt""" ).input_ids a = model.generate(__lowerCamelCase , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
347
1
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class __lowerCAmelCase : def __init__( self :Any , __magic_name__ :Union[str, Any] , __magic_name__ :List[str]=2 , __magic_name__ :Dict=True , __magic_name__ :Optional[int]=False , __magic_name__ :Any=10 , __magic_name__ :int=3 , __magic_name__ :Optional[Any]=32 * 4 , __magic_name__ :int=32 * 6 , __magic_name__ :int=4 , __magic_name__ :Union[str, Any]=32 , ): '''simple docstring''' a = parent a = batch_size a = is_training a = use_auxiliary_loss a = num_queries a = num_channels a = min_size a = max_size a = num_labels a = mask_feature_size def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __magic_name__ ) a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__magic_name__ ) a = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__magic_name__ ) > 0.5 ).float() a = (torch.rand((self.batch_size, self.num_labels) , device=__magic_name__ ) > 0.5).long() a = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowerCamelCase__ ( self :str ): '''simple docstring''' return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a , a , a , a , a = self.prepare_config_and_inputs() a = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def lowerCamelCase__ ( self :str , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any] ): '''simple docstring''' a = output.encoder_hidden_states a = output.pixel_decoder_hidden_states a = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__magic_name__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__magic_name__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__magic_name__ ) , config.decoder_config.decoder_layers ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :List[str] , __magic_name__ :List[Any]=False ): '''simple docstring''' with torch.no_grad(): a = MaskFormerModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() a = model(pixel_values=__magic_name__ , pixel_mask=__magic_name__ ) a = model(__magic_name__ , output_hidden_states=__magic_name__ ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__magic_name__ , __magic_name__ ) def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[Any] , __magic_name__ :Optional[Any] , __magic_name__ :str , __magic_name__ :Dict , __magic_name__ :Optional[Any] ): '''simple docstring''' a = MaskFormerForInstanceSegmentation(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() def comm_check_on_output(__magic_name__ :List[Any] ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): a = model(pixel_values=__magic_name__ , pixel_mask=__magic_name__ ) a = model(__magic_name__ ) comm_check_on_output(__magic_name__ ) a = model( pixel_values=__magic_name__ , pixel_mask=__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ ) comm_check_on_output(__magic_name__ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): UpperCamelCase__ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () UpperCamelCase__ = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = MaskFormerModelTester(self ) a = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ ) def lowerCamelCase__ ( self :str ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a , a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__magic_name__ , **__magic_name__ , output_hidden_states=__magic_name__ ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__magic_name__ ) @unittest.skip(reason="""MaskFormer does not use inputs_embeds""" ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' pass @unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" ) def lowerCamelCase__ ( self :str ): '''simple docstring''' pass @unittest.skip(reason="""MaskFormer is not a generative model""" ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' pass @unittest.skip(reason="""MaskFormer does not use token embeddings""" ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip( reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' pass def lowerCamelCase__ ( self :int ): '''simple docstring''' a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__magic_name__ ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) @slow def lowerCamelCase__ ( self :Dict ): '''simple docstring''' for model_name in ["facebook/maskformer-swin-small-coco"]: a = MaskFormerModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = (self.model_tester.min_size,) * 2 a = { """pixel_values""": torch.randn((2, 3, *size) , device=__magic_name__ ), """mask_labels""": torch.randn((2, 10, *size) , device=__magic_name__ ), """class_labels""": torch.zeros(2 , 10 , device=__magic_name__ ).long(), } a = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__magic_name__ ) a = model(**__magic_name__ ) self.assertTrue(outputs.loss is not None ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' a , a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__magic_name__ , **__magic_name__ , output_hidden_states=__magic_name__ ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__magic_name__ ).to(__magic_name__ ) a = model(**__magic_name__ , output_attentions=__magic_name__ ) self.assertTrue(outputs.attentions is not None ) def lowerCamelCase__ ( self :int ): '''simple docstring''' if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss a = self.all_model_classes[1] a , a , a , a , a = self.model_tester.prepare_config_and_inputs() a = model_class(__magic_name__ ) model.to(__magic_name__ ) model.train() a = model(__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ ).loss loss.backward() def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = self.all_model_classes[1] a , a , a , a , a = self.model_tester.prepare_config_and_inputs() a = True a = True a = model_class(__magic_name__ ) model.to(__magic_name__ ) model.train() a = model(__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ ) a = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() a = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't a = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() a = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__magic_name__ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __UpperCamelCase : Union[str, Any] = 1E-4 def __A ( ) -> str: a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase__ ( self :int ): '''simple docstring''' return ( MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" ) if is_vision_available() else None ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__magic_name__ ) a = self.default_image_processor a = prepare_img() a = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ ) a = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__magic_name__ , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__magic_name__ ) a = torch.tensor( [[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(__magic_name__ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) ) a = torch.tensor( [[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(__magic_name__ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) ) a = torch.tensor( [[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(__magic_name__ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__magic_name__ ) .eval() ) a = self.default_image_processor a = prepare_img() a = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ ) a = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__magic_name__ , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__magic_name__ ) # masks_queries_logits a = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) a = [ [-1.3737124, -1.7724937, -1.9364233], [-1.5977281, -1.9867939, -2.1523695], [-1.5795398, -1.9269832, -2.093942], ] a = torch.tensor(__magic_name__ ).to(__magic_name__ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) ) # class_queries_logits a = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) a = torch.tensor( [ [1.6_5_1_2E0_0, -5.2_5_7_2E0_0, -3.3_5_1_9E0_0], [3.6_1_6_9E-0_2, -5.9_0_2_5E0_0, -2.9_3_1_3E0_0], [1.0_7_6_6E-0_4, -7.7_6_3_0E0_0, -5.1_2_6_3E0_0], ] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) ) def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" ) .to(__magic_name__ ) .eval() ) a = self.default_image_processor a = prepare_img() a = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ ) a = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__magic_name__ , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__magic_name__ ) # masks_queries_logits a = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) a = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]] a = torch.tensor(__magic_name__ ).to(__magic_name__ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) ) # class_queries_logits a = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) a = torch.tensor( [[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__magic_name__ ) .eval() ) a = self.default_image_processor a = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , ) a = inputs["""pixel_values"""].to(__magic_name__ ) a = [el.to(__magic_name__ ) for el in inputs["""mask_labels"""]] a = [el.to(__magic_name__ ) for el in inputs["""class_labels"""]] with torch.no_grad(): a = model(**__magic_name__ ) self.assertTrue(outputs.loss is not None )
347
import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters __UpperCamelCase : Union[str, Any] = (720, 1_280) # Height, Width __UpperCamelCase : Any = (0.4, 0.6) # if height or width lower than this scale, drop it. __UpperCamelCase : str = 1 / 100 __UpperCamelCase : Optional[int] = "" __UpperCamelCase : List[Any] = "" __UpperCamelCase : Union[str, Any] = "" __UpperCamelCase : Tuple = 250 def __A ( ) -> None: a , a = get_dataset(__lowerCamelCase , __lowerCamelCase ) for index in range(__lowerCamelCase ): a = random.sample(range(len(__lowerCamelCase ) ) , 4 ) a , a , a = update_image_and_anno( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , filter_scale=__lowerCamelCase , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' a = random_chars(32 ) a = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0] a = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}' cva.imwrite(f'{file_root}.jpg' , __lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' ) a = [] for anno in new_annos: a = anno[3] - anno[1] a = anno[4] - anno[2] a = anno[1] + width / 2 a = anno[2] + height / 2 a = f'{anno[0]} {x_center} {y_center} {width} {height}' annos_list.append(__lowerCamelCase ) with open(f'{file_root}.txt' , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> tuple[list, list]: a = [] a = [] for label_file in glob.glob(os.path.join(__lowerCamelCase , """*.txt""" ) ): a = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(__lowerCamelCase ) as in_file: a = in_file.readlines() a = os.path.join(__lowerCamelCase , f'{label_name}.jpg' ) a = [] for obj_list in obj_lists: a = obj_list.rstrip("""\n""" ).split(""" """ ) a = float(obj[1] ) - float(obj[3] ) / 2 a = float(obj[2] ) - float(obj[4] ) / 2 a = float(obj[1] ) + float(obj[3] ) / 2 a = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(__lowerCamelCase ) labels.append(__lowerCamelCase ) return img_paths, labels def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 0.0 , ) -> tuple[list, list, str]: a = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) a = int(scale_x * output_size[1] ) a = int(scale_y * output_size[0] ) a = [] a = [] for i, index in enumerate(__lowerCamelCase ): a = all_img_list[index] path_list.append(__lowerCamelCase ) a = all_annos[index] a = cva.imread(__lowerCamelCase ) if i == 0: # top-left a = cva.resize(__lowerCamelCase , (divid_point_x, divid_point_y) ) a = img for bbox in img_annos: a = bbox[1] * scale_x a = bbox[2] * scale_y a = bbox[3] * scale_x a = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right a = cva.resize(__lowerCamelCase , (output_size[1] - divid_point_x, divid_point_y) ) a = img for bbox in img_annos: a = scale_x + bbox[1] * (1 - scale_x) a = bbox[2] * scale_y a = scale_x + bbox[3] * (1 - scale_x) a = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left a = cva.resize(__lowerCamelCase , (divid_point_x, output_size[0] - divid_point_y) ) a = img for bbox in img_annos: a = bbox[1] * scale_x a = scale_y + bbox[2] * (1 - scale_y) a = bbox[3] * scale_x a = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right a = cva.resize( __lowerCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) a = img for bbox in img_annos: a = scale_x + bbox[1] * (1 - scale_x) a = scale_y + bbox[2] * (1 - scale_y) a = scale_x + bbox[3] * (1 - scale_x) a = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: a = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def __A ( __lowerCamelCase ) -> str: assert number_char > 1, "The number of character should greater than 1" a = ascii_lowercase + digits return "".join(random.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) ) if __name__ == "__main__": main() print("DONE ✅")
347
1
def __A ( __lowerCamelCase ) -> bool: if num < 0: return False a = num a = 0 while num > 0: a = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
347
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase : Optional[Any] = { "configuration_mobilenet_v2": [ "MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileNetV2Config", "MobileNetV2OnnxConfig", ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = ["MobileNetV2FeatureExtractor"] __UpperCamelCase : Tuple = ["MobileNetV2ImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = [ "MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileNetV2ForImageClassification", "MobileNetV2ForSemanticSegmentation", "MobileNetV2Model", "MobileNetV2PreTrainedModel", "load_tf_weights_in_mobilenet_v2", ] if TYPE_CHECKING: from .configuration_mobilenet_va import ( MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileNetVaConfig, MobileNetVaOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor from .image_processing_mobilenet_va import MobileNetVaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilenet_va import ( MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel, MobileNetVaPreTrainedModel, load_tf_weights_in_mobilenet_va, ) else: import sys __UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
347
1
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __UpperCamelCase : Union[str, Any] = logging.getLogger(__name__) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , ) -> Optional[int]: a = bnb_quantization_config.load_in_abit a = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) a = [] # custom device map if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(device_map.keys() ) > 1: a = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: a = get_keys_to_not_convert(__lowerCamelCase ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(__lowerCamelCase ) a = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: a = [] a = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(__lowerCamelCase ) # compatibility with peft a = load_in_abit a = load_in_abit a = get_parameter_device(__lowerCamelCase ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) a = replace_with_bnb_layers(__lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase ) # convert param to the right dtype a = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: a = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" ) a = getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(__lowerCamelCase ): param.to(__lowerCamelCase ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f'The model device type is {model_device.type}. However, cuda is needed for quantization.' """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' ) else: with init_empty_weights(): a = replace_with_bnb_layers( __lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase ) a = get_quantized_model_device_map( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_memory=__lowerCamelCase , no_split_module_classes=__lowerCamelCase , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): a = True a = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowerCamelCase , offload_state_dict=__lowerCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(__lowerCamelCase , device_map=__lowerCamelCase , offload_dir=__lowerCamelCase ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None ) -> Tuple: if device_map is None: if torch.cuda.is_available(): a = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(__lowerCamelCase , __lowerCamelCase ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) a = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) a = {} a = special_dtypes a = no_split_module_classes a = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": a = get_balanced_memory( __lowerCamelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=__lowerCamelCase , **__lowerCamelCase , ) a = max_memory a = infer_auto_device_map(__lowerCamelCase , **__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): # check if don't have any quantized module on the cpu a = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules a = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ) -> Tuple: if modules_to_not_convert is None: a = [] a , a = _replace_with_bnb_layers( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> int: a = False for name, module in model.named_children(): if current_key_name is None: a = [] current_key_name.append(__lowerCamelCase ) if isinstance(__lowerCamelCase , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` a = """.""".join(__lowerCamelCase ) a = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: a = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: a = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowerCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: a = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) a = module.weight.data if module.bias is not None: a = module.bias.data bnb_module.requires_grad_(__lowerCamelCase ) setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) a = True if len(list(module.children() ) ) > 0: a , a = _replace_with_bnb_layers( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) a = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def __A ( __lowerCamelCase ) -> Optional[int]: # Create a copy of the model with init_empty_weights(): a = deepcopy(__lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager` a = find_tied_parameters(__lowerCamelCase ) # For compatibility with Accelerate < 0.18 if isinstance(__lowerCamelCase , __lowerCamelCase ): a = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: a = sum(__lowerCamelCase , [] ) a = len(__lowerCamelCase ) > 0 # Check if it is a base model a = False if hasattr(__lowerCamelCase , """base_model_prefix""" ): a = not hasattr(__lowerCamelCase , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head a = list(model.named_children() ) a = [list_modules[-1][0]] # add last module together with tied weights a = set(__lowerCamelCase ) - set(__lowerCamelCase ) a = list(set(__lowerCamelCase ) ) + list(__lowerCamelCase ) # remove ".weight" from the keys a = [""".weight""", """.bias"""] a = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: a = name.replace(__lowerCamelCase , """""" ) filtered_module_names.append(__lowerCamelCase ) return filtered_module_names def __A ( __lowerCamelCase ) -> str: for m in model.modules(): if isinstance(__lowerCamelCase , bnb.nn.Linearabit ): return True return False def __A ( __lowerCamelCase ) -> int: return next(parameter.parameters() ).device def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , 0 , dtype=__lowerCamelCase , value=__lowerCamelCase ) a = param_name a = model if "." in tensor_name: a = tensor_name.split(""".""" ) for split in splits[:-1]: a = getattr(__lowerCamelCase , __lowerCamelCase ) if new_module is None: raise ValueError(f'{module} has no attribute {split}.' ) a = new_module a = splits[-1] # offload weights a = False offload_weight(module._parameters[tensor_name] , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase ) if hasattr(module._parameters[tensor_name] , """SCB""" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , __lowerCamelCase , index=__lowerCamelCase , ) else: offload_weight(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase ) offload_weight(__lowerCamelCase , param_name.replace("""weight""" , """SCB""" ) , __lowerCamelCase , index=__lowerCamelCase ) set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , """meta""" , dtype=__lowerCamelCase , value=torch.empty(*param.size() ) )
347
def __A ( __lowerCamelCase ) -> bool: if num < 0: return False a = num a = 0 while num > 0: a = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
347
1
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class __lowerCAmelCase : def __init__( self :Optional[int] , __magic_name__ :List[Any] , __magic_name__ :Optional[int]=13 , __magic_name__ :Union[str, Any]=7 , __magic_name__ :str=False , __magic_name__ :Optional[Any]=True , __magic_name__ :Optional[Any]=False , __magic_name__ :Dict=False , __magic_name__ :List[Any]=19 , __magic_name__ :List[str]=32 , __magic_name__ :Any=5 , __magic_name__ :str=4 , __magic_name__ :Union[str, Any]=37 , __magic_name__ :List[Any]="gelu" , __magic_name__ :List[str]=0.1 , __magic_name__ :str=0.1 , __magic_name__ :Union[str, Any]=512 , __magic_name__ :Tuple=16 , __magic_name__ :Optional[Any]=2 , __magic_name__ :List[str]=0.02 , __magic_name__ :List[Any]=3 , __magic_name__ :Tuple=4 , __magic_name__ :Any=None , ): '''simple docstring''' a = parent a = batch_size a = seq_length a = is_training a = use_input_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_labels a = num_choices a = scope def lowerCamelCase__ ( self :str ): '''simple docstring''' a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_input_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a = ids_tensor([self.batch_size] , self.num_choices ) a = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self :int ): '''simple docstring''' a = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__magic_name__ , esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} , ) return config def lowerCamelCase__ ( self :int , __magic_name__ :List[Any] , __magic_name__ :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :Optional[int] , __magic_name__ :Union[str, Any] , __magic_name__ :str ): '''simple docstring''' a = EsmForProteinFolding(config=__magic_name__ ).float() model.to(__magic_name__ ) model.eval() a = model(__magic_name__ , attention_mask=__magic_name__ ) a = model(__magic_name__ ) a = model(__magic_name__ ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) = config_and_inputs a = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): UpperCamelCase__ = False UpperCamelCase__ = (EsmForProteinFolding,) if is_torch_available() else () UpperCamelCase__ = () UpperCamelCase__ = {} if is_torch_available() else {} UpperCamelCase__ = False def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = EsmFoldModelTester(self ) a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) @unittest.skip("""Does not support attention outputs""" ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' pass @unittest.skip def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' pass @unittest.skip("""Esm does not support embedding resizing""" ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' pass @unittest.skip("""Esm does not support embedding resizing""" ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' pass @unittest.skip("""ESMFold does not support passing input embeds!""" ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' pass @unittest.skip("""ESMFold does not support head pruning.""" ) def lowerCamelCase__ ( self :int ): '''simple docstring''' pass @unittest.skip("""ESMFold does not support head pruning.""" ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass @unittest.skip("""ESMFold does not support head pruning.""" ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' pass @unittest.skip("""ESMFold does not support head pruning.""" ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' pass @unittest.skip("""ESMFold does not support head pruning.""" ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' pass @unittest.skip("""ESMFold does not output hidden states in the normal way.""" ) def lowerCamelCase__ ( self :int ): '''simple docstring''' pass @unittest.skip("""ESMfold does not output hidden states in the normal way.""" ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass @unittest.skip("""ESMFold only has one output format.""" ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass @unittest.skip("""This test doesn't work for ESMFold and doesn't test core functionality""" ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' pass @unittest.skip("""ESMFold does not support input chunking.""" ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass @unittest.skip("""ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.""" ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass @unittest.skip("""ESMFold doesn't support torchscript compilation.""" ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' pass @unittest.skip("""ESMFold doesn't support torchscript compilation.""" ) def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' pass @unittest.skip("""ESMFold doesn't support torchscript compilation.""" ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' pass @unittest.skip("""ESMFold doesn't support data parallel.""" ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass @require_torch class __lowerCAmelCase ( __magic_name__ ): @slow def lowerCamelCase__ ( self :int ): '''simple docstring''' a = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float() model.eval() a = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) a = model(__magic_name__ )["""positions"""] a = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __magic_name__ , atol=1E-4 ) )
347
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): UpperCamelCase__ = CanineTokenizer UpperCamelCase__ = False def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' super().setUp() a = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowerCamelCase__ ( self :Dict ): '''simple docstring''' return CanineTokenizer.from_pretrained("""google/canine-s""" ) def lowerCamelCase__ ( self :Tuple , **__magic_name__ :Dict ): '''simple docstring''' a = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ ) a = 1024 return tokenizer @require_torch def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.canine_tokenizer a = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""] # fmt: off a = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0] # fmt: on a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) self.assertIsInstance(__magic_name__ , __magic_name__ ) a = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.canine_tokenizer a = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""] a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("""input_ids""" , __magic_name__ ) self.assertIn("""attention_mask""" , __magic_name__ ) self.assertIn("""token_type_ids""" , __magic_name__ ) @require_torch def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.canine_tokenizer a = [ """What's the weater?""", """It's about 25 degrees.""", ] a = tokenizer( text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test a = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc a = tempfile.mkdtemp() a = """ He is very happy, UNwant\u00E9d,running""" a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) a = tokenizer.__class__.from_pretrained(__magic_name__ ) a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) shutil.rmtree(__magic_name__ ) a = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc a = tempfile.mkdtemp() a = """ He is very happy, UNwant\u00E9d,running""" a = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: a = chr(0Xe_0_0_7 ) additional_special_tokens.append(__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) a = tokenizer.__class__.from_pretrained(__magic_name__ ) a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) a = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__magic_name__ ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a , a = self.get_clean_sequence(__magic_name__ ) # a special token for Canine can be defined as follows: a = 0Xe_0_0_5 a = chr(__magic_name__ ) tokenizer.add_special_tokens({"""cls_token""": special_token} ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) a = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(__magic_name__ , input_encoded + special_token_id ) a = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) self.assertTrue(special_token not in decoded ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a = chr(0Xe_0_0_5 ) a = chr(0Xe_0_0_6 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} ) a = tokenizer.tokenize(__magic_name__ ) a = tokenizer.tokenize(__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(token_a[0] , __magic_name__ ) self.assertEqual(token_a[0] , __magic_name__ ) @require_tokenizers def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # a special token for Canine can be defined as follows: a = 0Xe_0_0_6 a = chr(__magic_name__ ) a = AddedToken(__magic_name__ , lstrip=__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(__magic_name__ ) tokenizer.from_pretrained(__magic_name__ ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__magic_name__ ) with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: a = json.load(__magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: a = json.load(__magic_name__ ) # a special token for Canine can be defined as follows: a = 0Xe_0_0_6 a = chr(__magic_name__ ) a = [new_token_a] a = [new_token_a] with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files a = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) a = 0Xe_0_0_7 a = chr(__magic_name__ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained a = [AddedToken(__magic_name__ , lstrip=__magic_name__ )] a = tokenizer_class.from_pretrained( __magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a = """hello world""" if self.space_between_special_tokens: a = """[CLS] hello world [SEP]""" else: a = input a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) a = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(__magic_name__ , [output, output.lower()] ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] a = """a""" a = ord(__magic_name__ ) for attr in attributes_list: setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] ) a = 0Xe_0_0_6 a = chr(__magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' pass def lowerCamelCase__ ( self :str ): '''simple docstring''' pass def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass def lowerCamelCase__ ( self :Any ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass
347
1
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm __UpperCamelCase : int = logging.get_logger(__name__) @dataclass class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self :str , **__magic_name__ :Dict ): '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: a = deprecated_arg[3:] setattr(self , __magic_name__ , not kwargs.pop(__magic_name__ ) ) logger.warning( F'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or' F' {positive_arg}={kwargs[positive_arg]}' ) a = kwargs.pop("""torchscript""" , self.torchscript ) a = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics ) a = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level ) super().__init__(**__magic_name__ ) UpperCamelCase__ = field(default=__magic_name__ , metadata={'''help''': '''Trace the models using torchscript'''} ) UpperCamelCase__ = field(default=__magic_name__ , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} ) UpperCamelCase__ = field( default='''O1''' , metadata={ '''help''': ( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ''' '''See details at https://nvidia.github.io/apex/amp.html''' ) } , ) @cached_property def lowerCamelCase__ ( self :Dict ): '''simple docstring''' requires_backends(self , ["""torch"""] ) logger.info("""PyTorch: setting up devices""" ) if not self.cuda: a = torch.device("""cpu""" ) a = 0 elif is_torch_tpu_available(): a = xm.xla_device() a = 0 else: a = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) a = torch.cuda.device_count() return device, n_gpu @property def lowerCamelCase__ ( self :str ): '''simple docstring''' return is_torch_tpu_available() and self.tpu @property def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) return self._setup_devices[0] @property def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' requires_backends(self , ["""torch"""] ) return self._setup_devices[1] @property def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' return self.n_gpu > 0
347
def __A ( __lowerCamelCase ) -> bool: return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
347
1
def __A ( __lowerCamelCase ) -> int: if not numbers: return 0 if not isinstance(__lowerCamelCase , (list, tuple) ) or not all( isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ): raise ValueError("""numbers must be an iterable of integers""" ) a = a = a = numbers[0] for i in range(1 , len(__lowerCamelCase ) ): # update the maximum and minimum subarray products a = numbers[i] if number < 0: a , a = min_till_now, max_till_now a = max(__lowerCamelCase , max_till_now * number ) a = min(__lowerCamelCase , min_till_now * number ) # update the maximum product found till now a = max(__lowerCamelCase , __lowerCamelCase ) return max_prod
347
def __A ( __lowerCamelCase ) -> int: if not numbers: return 0 if not isinstance(__lowerCamelCase , (list, tuple) ) or not all( isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ): raise ValueError("""numbers must be an iterable of integers""" ) a = a = a = numbers[0] for i in range(1 , len(__lowerCamelCase ) ): # update the maximum and minimum subarray products a = numbers[i] if number < 0: a , a = min_till_now, max_till_now a = max(__lowerCamelCase , max_till_now * number ) a = min(__lowerCamelCase , min_till_now * number ) # update the maximum product found till now a = max(__lowerCamelCase , __lowerCamelCase ) return max_prod
347
1
import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): UpperCamelCase__ = BlenderbotSmallTokenizer UpperCamelCase__ = False def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' super().setUp() a = ["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""] a = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) a = ["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""] a = {"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""} a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__magic_name__ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__magic_name__ ) ) def lowerCamelCase__ ( self :Tuple , **__magic_name__ :Optional[Any] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ ) def lowerCamelCase__ ( self :int , __magic_name__ :Dict ): '''simple docstring''' a = """adapt act apte""" a = """adapt act apte""" return input_text, output_text def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) a = """adapt act apte""" a = ["""adapt""", """act""", """ap@@""", """te"""] a = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) a = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] a = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ ) def lowerCamelCase__ ( self :str ): '''simple docstring''' a = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" ) assert tok("""sam""" ).input_ids == [1384] a = """I am a small frog.""" a = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )["""input_ids"""] a = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" ) a = """I am a small frog .""" a = """.""" a = tok(__magic_name__ )["""input_ids"""] a = tok(__magic_name__ )["""input_ids"""] assert encoded[-1] == encoded_dot[0]
347
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __UpperCamelCase : Optional[Any] = { "configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"], "feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"], "processing_wav2vec2": ["Wav2Vec2Processor"], "tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : str = [ "WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "Wav2Vec2ForAudioFrameClassification", "Wav2Vec2ForCTC", "Wav2Vec2ForMaskedLM", "Wav2Vec2ForPreTraining", "Wav2Vec2ForSequenceClassification", "Wav2Vec2ForXVector", "Wav2Vec2Model", "Wav2Vec2PreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Dict = [ "TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "TFWav2Vec2ForCTC", "TFWav2Vec2Model", "TFWav2Vec2PreTrainedModel", "TFWav2Vec2ForSequenceClassification", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Union[str, Any] = [ "FlaxWav2Vec2ForCTC", "FlaxWav2Vec2ForPreTraining", "FlaxWav2Vec2Model", "FlaxWav2Vec2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
347
1
from maths.prime_factors import prime_factors def __A ( __lowerCamelCase ) -> int: if not isinstance(__lowerCamelCase , __lowerCamelCase ): a = f'Input value of [number={number}] must be an integer' raise TypeError(__lowerCamelCase ) if number < 1: raise ValueError("""Input must be a positive integer""" ) return -1 if len(prime_factors(__lowerCamelCase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
347
import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any: if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , __lowerCamelCase ) a = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: a = dataset_size < in_memory_max_size else: a = False a = is_small_dataset(__lowerCamelCase ) assert result == expected
347
1
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class __lowerCAmelCase : def __init__( self :int , __magic_name__ :Optional[Any] , __magic_name__ :Dict=13 , __magic_name__ :Any=7 , __magic_name__ :Optional[int]=True , __magic_name__ :Optional[Any]=True , __magic_name__ :int=True , __magic_name__ :int=True , __magic_name__ :List[Any]=99 , __magic_name__ :List[str]=32 , __magic_name__ :Any=2 , __magic_name__ :int=4 , __magic_name__ :List[str]=37 , __magic_name__ :Tuple="gelu" , __magic_name__ :Union[str, Any]=0.1 , __magic_name__ :Any=0.1 , __magic_name__ :Optional[int]=512 , __magic_name__ :Dict=16 , __magic_name__ :Tuple=2 , __magic_name__ :Optional[int]=0.02 , __magic_name__ :Tuple=3 , __magic_name__ :Any=4 , __magic_name__ :str=None , ): '''simple docstring''' a = parent a = 13 a = 7 a = True a = True a = True a = True a = 99 a = 384 a = 2 a = 4 a = 37 a = """gelu""" a = 0.1 a = 0.1 a = 512 a = 16 a = 2 a = 0.02 a = 3 a = 4 a = 128 a = 2 a = 9 a = 1 a = None def lowerCamelCase__ ( self :str ): '''simple docstring''' a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_input_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = None a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a = ids_tensor([self.batch_size] , self.num_choices ) a = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__magic_name__ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Any , __magic_name__ :str , __magic_name__ :List[Any] , __magic_name__ :Optional[Any] , __magic_name__ :Tuple , __magic_name__ :List[Any] , __magic_name__ :List[Any] ): '''simple docstring''' a = TFConvBertModel(config=__magic_name__ ) a = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} a = [input_ids, input_mask] a = model(__magic_name__ ) a = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self :Dict , __magic_name__ :Union[str, Any] , __magic_name__ :Tuple , __magic_name__ :Dict , __magic_name__ :List[str] , __magic_name__ :Tuple , __magic_name__ :str , __magic_name__ :Dict ): '''simple docstring''' a = TFConvBertForMaskedLM(config=__magic_name__ ) a = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } a = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :str , __magic_name__ :Any , __magic_name__ :Any , __magic_name__ :Tuple , __magic_name__ :Dict , __magic_name__ :Optional[int] , __magic_name__ :Dict ): '''simple docstring''' a = self.num_labels a = TFConvBertForSequenceClassification(config=__magic_name__ ) a = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } a = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Any , __magic_name__ :int , __magic_name__ :Optional[Any] , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :List[str] , __magic_name__ :int ): '''simple docstring''' a = self.num_choices a = TFConvBertForMultipleChoice(config=__magic_name__ ) a = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) a = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) a = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) a = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } a = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self :int , __magic_name__ :str , __magic_name__ :Any , __magic_name__ :Optional[int] , __magic_name__ :Any , __magic_name__ :Any , __magic_name__ :List[Any] , __magic_name__ :Optional[Any] ): '''simple docstring''' a = self.num_labels a = TFConvBertForTokenClassification(config=__magic_name__ ) a = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } a = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self :List[str] , __magic_name__ :List[Any] , __magic_name__ :int , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :List[str] , __magic_name__ :Dict , __magic_name__ :Any ): '''simple docstring''' a = TFConvBertForQuestionAnswering(config=__magic_name__ ) a = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } a = model(__magic_name__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) = config_and_inputs a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): UpperCamelCase__ = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) UpperCamelCase__ = ( { '''feature-extraction''': TFConvBertModel, '''fill-mask''': TFConvBertForMaskedLM, '''question-answering''': TFConvBertForQuestionAnswering, '''text-classification''': TFConvBertForSequenceClassification, '''token-classification''': TFConvBertForTokenClassification, '''zero-shot''': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = TFConvBertModelTester(self ) a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__magic_name__ ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__magic_name__ ) def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__magic_name__ ) @slow def lowerCamelCase__ ( self :Any ): '''simple docstring''' a , a = self.model_tester.prepare_config_and_inputs_for_common() a = True a = True if hasattr(__magic_name__ , """use_cache""" ): a = True a = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) a = getattr(self.model_tester , """key_length""" , __magic_name__ ) for model_class in self.all_model_classes: a = self._prepare_for_class(__magic_name__ , __magic_name__ ) a = model_class(__magic_name__ ) a = len(model(__magic_name__ ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__magic_name__ , saved_model=__magic_name__ ) a = os.path.join(__magic_name__ , """saved_model""" , """1""" ) a = tf.keras.models.load_model(__magic_name__ ) a = model(__magic_name__ ) if self.is_encoder_decoder: a = outputs["""encoder_hidden_states"""] a = outputs["""encoder_attentions"""] else: a = outputs["""hidden_states"""] a = outputs["""attentions"""] self.assertEqual(len(__magic_name__ ) , __magic_name__ ) a = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(__magic_name__ ) , __magic_name__ ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) self.assertIsNotNone(__magic_name__ ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a , a = self.model_tester.prepare_config_and_inputs_for_common() a = True a = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length ) a = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) a = getattr(self.model_tester , """key_length""" , __magic_name__ ) a = getattr(self.model_tester , """key_length""" , __magic_name__ ) def check_decoder_attentions_output(__magic_name__ :Dict ): a = len(__magic_name__ ) self.assertEqual(out_len % 2 , 0 ) a = outputs.decoder_attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(__magic_name__ :Tuple ): a = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: a = True a = False a = model_class(__magic_name__ ) a = model(self._prepare_for_class(__magic_name__ , __magic_name__ ) ) a = len(__magic_name__ ) self.assertEqual(config.output_hidden_states , __magic_name__ ) check_encoder_attentions_output(__magic_name__ ) if self.is_encoder_decoder: a = model_class(__magic_name__ ) a = model(self._prepare_for_class(__magic_name__ , __magic_name__ ) ) self.assertEqual(config.output_hidden_states , __magic_name__ ) check_decoder_attentions_output(__magic_name__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] a = True a = model_class(__magic_name__ ) a = model(self._prepare_for_class(__magic_name__ , __magic_name__ ) ) self.assertEqual(config.output_hidden_states , __magic_name__ ) check_encoder_attentions_output(__magic_name__ ) # Check attention is always last and order is fine a = True a = True a = model_class(__magic_name__ ) a = model(self._prepare_for_class(__magic_name__ , __magic_name__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__magic_name__ ) ) self.assertEqual(model.config.output_hidden_states , __magic_name__ ) check_encoder_attentions_output(__magic_name__ ) @require_tf class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) a = tf.constant([[0, 1, 2, 3, 4, 5]] ) a = model(__magic_name__ )[0] a = [1, 6, 768] self.assertEqual(output.shape , __magic_name__ ) a = tf.constant( [ [ [-0.03475493, -0.4686034, -0.30638832], [0.22637248, -0.26988646, -0.7423424], [0.10324868, -0.45013508, -0.58280784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __magic_name__ , atol=1E-4 )
347
from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def __A ( __lowerCamelCase ) -> bool: a = int(number**0.5 ) return number == sq * sq def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> tuple[int, int]: a = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den a = x_den * y_den * z_den a = gcd(__lowerCamelCase , __lowerCamelCase ) top //= hcf bottom //= hcf return top, bottom def __A ( __lowerCamelCase = 35 ) -> int: a = set() a = 42 a = Fraction(0 ) a = 42 for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 a = x_num * y_den + x_den * y_num a = x_den * y_den a = gcd(__lowerCamelCase , __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: a = add_three( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) unique_s.add(__lowerCamelCase ) # n=2 a = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) a = x_den * x_den * y_den * y_den if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ): a = int(sqrt(__lowerCamelCase ) ) a = int(sqrt(__lowerCamelCase ) ) a = gcd(__lowerCamelCase , __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: a = add_three( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) unique_s.add(__lowerCamelCase ) # n=-1 a = x_num * y_num a = x_den * y_num + x_num * y_den a = gcd(__lowerCamelCase , __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: a = add_three( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) unique_s.add(__lowerCamelCase ) # n=2 a = x_num * x_num * y_num * y_num a = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ): a = int(sqrt(__lowerCamelCase ) ) a = int(sqrt(__lowerCamelCase ) ) a = gcd(__lowerCamelCase , __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: a = add_three( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) unique_s.add(__lowerCamelCase ) for num, den in unique_s: total += Fraction(__lowerCamelCase , __lowerCamelCase ) return total.denominator + total.numerator if __name__ == "__main__": print(F'{solution() = }')
347
1
from __future__ import annotations import string from itertools import cycle, product from pathlib import Path __UpperCamelCase : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) __UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase] __UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS} __UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def __A ( __lowerCamelCase , __lowerCamelCase ) -> str | None: a = "" a = 42 a = 42 a = 42 for keychar, cipherchar in zip(cycle(__lowerCamelCase ) , __lowerCamelCase ): a = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(__lowerCamelCase ) return decoded def __A ( __lowerCamelCase ) -> list[str]: a = [] for key in product(__lowerCamelCase , repeat=3 ): a = try_key(__lowerCamelCase , __lowerCamelCase ) if encoded is not None: possibles.append(__lowerCamelCase ) return possibles def __A ( __lowerCamelCase , __lowerCamelCase ) -> list[str]: return [possible for possible in possibles if common_word in possible.lower()] def __A ( __lowerCamelCase = "p059_cipher.txt" ) -> int: a = 42 a = 42 a = 42 a = 42 a = Path(__lowerCamelCase ).parent.joinpath(__lowerCamelCase ).read_text(encoding="""utf-8""" ) a = [int(__lowerCamelCase ) for number in data.strip().split(""",""" )] a = filter_valid_chars(__lowerCamelCase ) for common_word in COMMON_WORDS: a = filter_common_word(__lowerCamelCase , __lowerCamelCase ) if len(__lowerCamelCase ) == 1: break a = possibles[0] return sum(ord(__lowerCamelCase ) for char in decoded_text ) if __name__ == "__main__": print(F'{solution() = }')
347
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __lowerCAmelCase ( unittest.TestCase ): def __init__( self :List[str] , __magic_name__ :List[str] , __magic_name__ :List[Any]=13 , __magic_name__ :Any=7 , __magic_name__ :Optional[int]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :Any=99 , __magic_name__ :List[str]=32 , __magic_name__ :List[str]=5 , __magic_name__ :str=4 , __magic_name__ :str=37 , __magic_name__ :Optional[int]="gelu" , __magic_name__ :int=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :List[str]=512 , __magic_name__ :Tuple=16 , __magic_name__ :Tuple=2 , __magic_name__ :List[str]=0.02 , __magic_name__ :Any=4 , ): '''simple docstring''' a = parent a = batch_size a = seq_length a = is_training a = use_attention_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_choices def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_attention_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = self.prepare_config_and_inputs() a , a , a , a = config_and_inputs a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): UpperCamelCase__ = True UpperCamelCase__ = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = FlaxRoFormerModelTester(self ) @slow def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' for model_class_name in self.all_model_classes: a = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__magic_name__ ) a = model(np.ones((1, 1) ) ) self.assertIsNotNone(__magic_name__ ) @require_flax class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) a = jnp.array([[0, 1, 2, 3, 4, 5]] ) a = model(__magic_name__ )[0] a = 5_0000 a = (1, 6, vocab_size) self.assertEqual(output.shape , __magic_name__ ) a = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
347
1
import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder __UpperCamelCase : List[str] = "__DUMMY_TRANSFORMERS_USER__" __UpperCamelCase : Optional[int] = "Dummy User" __UpperCamelCase : Union[str, Any] = "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" __UpperCamelCase : Dict = "https://hub-ci.huggingface.co" __UpperCamelCase : Tuple = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}" __UpperCamelCase : Union[str, Any] = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}" __UpperCamelCase : Any = Path("~/.huggingface/hub_ci_token").expanduser() @pytest.fixture def __A ( __lowerCamelCase ) -> List[Any]: monkeypatch.setattr( """huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __lowerCamelCase ) @pytest.fixture def __A ( __lowerCamelCase ) -> Dict: monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __lowerCamelCase ) monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __lowerCamelCase ) @pytest.fixture def __A ( __lowerCamelCase ) -> Optional[Any]: monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __lowerCamelCase ) @pytest.fixture def __A ( __lowerCamelCase , __lowerCamelCase ) -> Dict: HfFolder.save_token(__lowerCamelCase ) yield HfFolder.delete_token() @pytest.fixture(scope="""session""" ) def __A ( ) -> Any: return HfApi(endpoint=__lowerCamelCase ) @pytest.fixture(scope="""session""" ) def __A ( __lowerCamelCase ) -> Union[str, Any]: a = HfFolder.get_token() HfFolder.save_token(__lowerCamelCase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(__lowerCamelCase ) @pytest.fixture def __A ( __lowerCamelCase ) -> Dict: def _cleanup_repo(__lowerCamelCase ): hf_api.delete_repo(__lowerCamelCase , token=__lowerCamelCase , repo_type="""dataset""" ) return _cleanup_repo @pytest.fixture def __A ( __lowerCamelCase ) -> Tuple: @contextmanager def _temporary_repo(__lowerCamelCase ): try: yield repo_id finally: cleanup_repo(__lowerCamelCase ) return _temporary_repo @pytest.fixture(scope="""session""" ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str: a = f'repo_txt_data-{int(time.time() * 1_0E3 )}' a = f'{CI_HUB_USER}/{repo_name}' hf_api.create_repo(__lowerCamelCase , token=__lowerCamelCase , repo_type="""dataset""" , private=__lowerCamelCase ) hf_api.upload_file( token=__lowerCamelCase , path_or_fileobj=str(__lowerCamelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__lowerCamelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__lowerCamelCase , token=__lowerCamelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]: return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="""session""" ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]: a = f'repo_zipped_txt_data-{int(time.time() * 1_0E3 )}' a = f'{CI_HUB_USER}/{repo_name}' hf_api.create_repo(__lowerCamelCase , token=__lowerCamelCase , repo_type="""dataset""" , private=__lowerCamelCase ) hf_api.upload_file( token=__lowerCamelCase , path_or_fileobj=str(__lowerCamelCase ) , path_in_repo="""data.zip""" , repo_id=__lowerCamelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__lowerCamelCase , token=__lowerCamelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]: return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="""session""" ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]: a = f'repo_zipped_img_data-{int(time.time() * 1_0E3 )}' a = f'{CI_HUB_USER}/{repo_name}' hf_api.create_repo(__lowerCamelCase , token=__lowerCamelCase , repo_type="""dataset""" , private=__lowerCamelCase ) hf_api.upload_file( token=__lowerCamelCase , path_or_fileobj=str(__lowerCamelCase ) , path_in_repo="""data.zip""" , repo_id=__lowerCamelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__lowerCamelCase , token=__lowerCamelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: return hf_private_dataset_repo_zipped_img_data_
347
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __UpperCamelCase : Optional[int] = { "configuration_blenderbot": [ "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotOnnxConfig", ], "tokenization_blenderbot": ["BlenderbotTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : List[Any] = ["BlenderbotTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Union[str, Any] = [ "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotForCausalLM", "BlenderbotForConditionalGeneration", "BlenderbotModel", "BlenderbotPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = [ "TFBlenderbotForConditionalGeneration", "TFBlenderbotModel", "TFBlenderbotPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ "FlaxBlenderbotForConditionalGeneration", "FlaxBlenderbotModel", "FlaxBlenderbotPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys __UpperCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
347
1
import os from collections.abc import Iterator def __A ( __lowerCamelCase = "." ) -> Iterator[str]: for dir_path, dir_names, filenames in os.walk(__lowerCamelCase ): a = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(__lowerCamelCase )[1] in (".py", ".ipynb"): yield os.path.join(__lowerCamelCase , __lowerCamelCase ).lstrip("""./""" ) def __A ( __lowerCamelCase ) -> Dict: return f'{i * " "}*' if i else "\n##" def __A ( __lowerCamelCase , __lowerCamelCase ) -> str: a = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(__lowerCamelCase ) or old_parts[i] != new_part) and new_part: print(f'{md_prefix(__lowerCamelCase )} {new_part.replace("_" , " " ).title()}' ) return new_path def __A ( __lowerCamelCase = "." ) -> None: a = """""" for filepath in sorted(good_file_paths(__lowerCamelCase ) ): a , a = os.path.split(__lowerCamelCase ) if filepath != old_path: a = print_path(__lowerCamelCase , __lowerCamelCase ) a = (filepath.count(os.sep ) + 1) if filepath else 0 a = f'{filepath}/{filename}'.replace(""" """ , """%20""" ) a = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0] print(f'{md_prefix(__lowerCamelCase )} [{filename}]({url})' ) if __name__ == "__main__": print_directory_md(".")
347
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = (IPNDMScheduler,) UpperCamelCase__ = (('''num_inference_steps''', 50),) def lowerCamelCase__ ( self :Any , **__magic_name__ :Optional[Any] ): '''simple docstring''' a = {"""num_train_timesteps""": 1000} config.update(**__magic_name__ ) return config def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple=0 , **__magic_name__ :Optional[int] ): '''simple docstring''' a = dict(self.forward_default_kwargs ) a = kwargs.pop("""num_inference_steps""" , __magic_name__ ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__magic_name__ ) a = scheduler_class(**__magic_name__ ) scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residuals a = dummy_past_residuals[:] if time_step is None: a = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__magic_name__ ) a = scheduler_class.from_pretrained(__magic_name__ ) new_scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residuals a = dummy_past_residuals[:] a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[Any]=0 , **__magic_name__ :Any ): '''simple docstring''' a = dict(self.forward_default_kwargs ) a = kwargs.pop("""num_inference_steps""" , __magic_name__ ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__magic_name__ ) scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residuals (must be after setting timesteps) a = dummy_past_residuals[:] if time_step is None: a = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__magic_name__ ) a = scheduler_class.from_pretrained(__magic_name__ ) # copy over dummy past residuals new_scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residual (must be after setting timesteps) a = dummy_past_residuals[:] a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCamelCase__ ( self :Optional[Any] , **__magic_name__ :Optional[int] ): '''simple docstring''' a = self.scheduler_classes[0] a = self.get_scheduler_config(**__magic_name__ ) a = scheduler_class(**__magic_name__ ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter scheduler.set_timesteps(__magic_name__ ) for i, t in enumerate(scheduler.timesteps ): a = model(__magic_name__ , __magic_name__ ) a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample for i, t in enumerate(scheduler.timesteps ): a = model(__magic_name__ , __magic_name__ ) a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample return sample def lowerCamelCase__ ( self :str ): '''simple docstring''' a = dict(self.forward_default_kwargs ) a = kwargs.pop("""num_inference_steps""" , __magic_name__ ) for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__magic_name__ ) a = self.dummy_sample a = 0.1 * sample if num_inference_steps is not None and hasattr(__magic_name__ , """set_timesteps""" ): scheduler.set_timesteps(__magic_name__ ) elif num_inference_steps is not None and not hasattr(__magic_name__ , """set_timesteps""" ): a = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] a = dummy_past_residuals[:] a = scheduler.timesteps[5] a = scheduler.timesteps[6] a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=__magic_name__ , time_step=__magic_name__ ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=__magic_name__ , time_step=__magic_name__ ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.full_loop() a = torch.mean(torch.abs(__magic_name__ ) ) assert abs(result_mean.item() - 254_0529 ) < 10
347
1
import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets __UpperCamelCase : Union[str, Any] = datasets.logging.get_logger(__name__) __UpperCamelCase : List[Any] = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n" __UpperCamelCase : Union[str, Any] = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n" __UpperCamelCase : int = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n" __UpperCamelCase : str = { "bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip", "bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip", "bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip", "bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip", "bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip", "bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip", "BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip", "BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip", "BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip", "BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip", } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , ) def lowerCamelCase__ ( self :Dict , __magic_name__ :List[Any] ): '''simple docstring''' if self.config_name == "default": logger.warning( """Using default BLEURT-Base checkpoint for sequence maximum length 128. """ """You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" ) a = """bleurt-base-128""" if self.config_name.lower() in CHECKPOINT_URLS: a = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: a = self.config_name.upper() else: raise KeyError( F'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' ) # download the model checkpoint specified by self.config_name and set up the scorer a = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) a = score.BleurtScorer(os.path.join(__magic_name__ , __magic_name__ ) ) def lowerCamelCase__ ( self :int , __magic_name__ :int , __magic_name__ :str ): '''simple docstring''' a = self.scorer.score(references=__magic_name__ , candidates=__magic_name__ ) return {"scores": scores}
347
__UpperCamelCase : Dict = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def __A ( ) -> None: a = input("""Enter message: """ ) a = input("""Enter key [alphanumeric]: """ ) a = input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): a = """encrypt""" a = encrypt_message(__lowerCamelCase , __lowerCamelCase ) elif mode.lower().startswith("""d""" ): a = """decrypt""" a = decrypt_message(__lowerCamelCase , __lowerCamelCase ) print(f'\n{mode.title()}ed message:' ) print(__lowerCamelCase ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> str: return translate_message(__lowerCamelCase , __lowerCamelCase , """encrypt""" ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> str: return translate_message(__lowerCamelCase , __lowerCamelCase , """decrypt""" ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str: a = [] a = 0 a = key.upper() for symbol in message: a = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(__lowerCamelCase ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(__lowerCamelCase ): a = 0 else: translated.append(__lowerCamelCase ) return "".join(__lowerCamelCase ) if __name__ == "__main__": main()
347
1
import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py __UpperCamelCase : Optional[Any] = "src/diffusers" # Matches is_xxx_available() __UpperCamelCase : Optional[int] = re.compile(R"is\_([a-z_]*)_available\(\)") # Matches from xxx import bla __UpperCamelCase : Optional[int] = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") __UpperCamelCase : Optional[Any] = "\n{0} = None\n" __UpperCamelCase : Union[str, Any] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n" __UpperCamelCase : int = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n" def __A ( __lowerCamelCase ) -> List[Any]: a = _re_backend.findall(__lowerCamelCase ) if len(__lowerCamelCase ) == 0: return None return "_and_".join(__lowerCamelCase ) def __A ( ) -> Tuple: with open(os.path.join(__lowerCamelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: a = f.readlines() # Get to the point we do the actual imports for type checking a = 0 a = {} # Go through the end of the file while line_index < len(__lowerCamelCase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block a = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("""else:""" ): line_index += 1 line_index += 1 a = [] # Until we unindent, add backend objects to the list while line_index < len(__lowerCamelCase ) and len(lines[line_index] ) > 1: a = lines[line_index] a = _re_single_line_import.search(__lowerCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(__lowerCamelCase ) > 0: a = objects else: line_index += 1 return backend_specific_objects def __A ( __lowerCamelCase , __lowerCamelCase ) -> List[str]: if name.isupper(): return DUMMY_CONSTANT.format(__lowerCamelCase ) elif name.islower(): return DUMMY_FUNCTION.format(__lowerCamelCase , __lowerCamelCase ) else: return DUMMY_CLASS.format(__lowerCamelCase , __lowerCamelCase ) def __A ( __lowerCamelCase=None ) -> Any: if backend_specific_objects is None: a = read_init() # For special correspondence backend to module name as used in the function requires_modulename a = {} for backend, objects in backend_specific_objects.items(): a = """[""" + """, """.join(f'"{b}"' for b in backend.split("""_and_""" ) ) + """]""" a = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n""" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(__lowerCamelCase , __lowerCamelCase ) for o in objects] ) a = dummy_file return dummy_files def __A ( __lowerCamelCase=False ) -> str: a = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py a = {"""torch""": """pt"""} # Locate actual dummy modules and read their content. a = os.path.join(__lowerCamelCase , """utils""" ) a = { backend: os.path.join(__lowerCamelCase , f'dummy_{short_names.get(__lowerCamelCase , __lowerCamelCase )}_objects.py' ) for backend in dummy_files.keys() } a = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(__lowerCamelCase ): with open(__lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: a = f.read() else: a = """""" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f'Updating diffusers.utils.dummy_{short_names.get(__lowerCamelCase , __lowerCamelCase )}_objects.py as the main ' """__init__ has new objects.""" ) with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( """The main __init__ has objects that are not present in """ f'diffusers.utils.dummy_{short_names.get(__lowerCamelCase , __lowerCamelCase )}_objects.py. Run `make fix-copies` ' """to fix this.""" ) if __name__ == "__main__": __UpperCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") __UpperCamelCase : List[Any] = parser.parse_args() check_dummies(args.fix_and_overwrite)
347
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class __lowerCAmelCase ( unittest.TestCase ): def __init__( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Tuple=13 , __magic_name__ :List[Any]=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :List[str]=True , __magic_name__ :str=99 , __magic_name__ :Optional[Any]=32 , __magic_name__ :Union[str, Any]=5 , __magic_name__ :Any=4 , __magic_name__ :int=37 , __magic_name__ :Tuple="gelu" , __magic_name__ :List[str]=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :Tuple=512 , __magic_name__ :Dict=16 , __magic_name__ :Optional[int]=2 , __magic_name__ :Optional[int]=0.02 , __magic_name__ :Optional[Any]=4 , ): '''simple docstring''' a = parent a = batch_size a = seq_length a = is_training a = use_attention_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_choices def lowerCamelCase__ ( self :int ): '''simple docstring''' a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_attention_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = self.prepare_config_and_inputs() a , a , a , a = config_and_inputs a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = self.prepare_config_and_inputs() a , a , a , a = config_and_inputs a = True a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): UpperCamelCase__ = True UpperCamelCase__ = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = FlaxRobertaModelTester(self ) @slow def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: a = model_class_name.from_pretrained("""roberta-base""" , from_pt=__magic_name__ ) a = model(np.ones((1, 1) ) ) self.assertIsNotNone(__magic_name__ )
347
1
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str: if number < 0 or shift_amount < 0: raise ValueError("""both inputs must be positive integers""" ) a = str(bin(__lowerCamelCase ) ) binary_number += "0" * shift_amount return binary_number def __A ( __lowerCamelCase , __lowerCamelCase ) -> str: if number < 0 or shift_amount < 0: raise ValueError("""both inputs must be positive integers""" ) a = str(bin(__lowerCamelCase ) )[2:] if shift_amount >= len(__lowerCamelCase ): return "0b0" a = binary_number[: len(__lowerCamelCase ) - shift_amount] return "0b" + shifted_binary_number def __A ( __lowerCamelCase , __lowerCamelCase ) -> str: if number >= 0: # Get binary representation of positive number a = """0""" + str(bin(__lowerCamelCase ) ).strip("""-""" )[2:] else: # Get binary (2's complement) representation of negative number a = len(bin(__lowerCamelCase )[3:] ) # Find 2's complement of number a = bin(abs(__lowerCamelCase ) - (1 << binary_number_length) )[3:] a = ( """1""" + """0""" * (binary_number_length - len(__lowerCamelCase )) + binary_number ) if shift_amount >= len(__lowerCamelCase ): return "0b" + binary_number[0] * len(__lowerCamelCase ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(__lowerCamelCase ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
347
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline __UpperCamelCase : Any = datasets.utils.logging.get_logger(__name__) @dataclass class __lowerCAmelCase ( datasets.BuilderConfig ): UpperCamelCase__ = None UpperCamelCase__ = "utf-8" UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = True # deprecated UpperCamelCase__ = None # deprecated UpperCamelCase__ = 10 << 20 # 10MB UpperCamelCase__ = None class __lowerCAmelCase ( datasets.ArrowBasedBuilder ): UpperCamelCase__ = JsonConfig def lowerCamelCase__ ( self :str ): '''simple docstring''' if self.config.block_size is not None: logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" ) a = self.config.block_size if self.config.use_threads is not True: logger.warning( """The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" ) if self.config.newlines_in_values is not None: raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" ) return datasets.DatasetInfo(features=self.config.features ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ): '''simple docstring''' if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) a = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__magic_name__ , (str, list, tuple) ): a = data_files if isinstance(__magic_name__ , __magic_name__ ): a = [files] a = [dl_manager.iter_files(__magic_name__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] a = [] for split_name, files in data_files.items(): if isinstance(__magic_name__ , __magic_name__ ): a = [files] a = [dl_manager.iter_files(__magic_name__ ) for file in files] splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"""files""": files} ) ) return splits def lowerCamelCase__ ( self :List[str] , __magic_name__ :pa.Table ): '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): a = self.config.features.arrow_schema.field(__magic_name__ ).type a = pa_table.append_column(__magic_name__ , pa.array([None] * len(__magic_name__ ) , type=__magic_name__ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example a = table_cast(__magic_name__ , self.config.features.arrow_schema ) return pa_table def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Union[str, Any] ): '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: a = json.load(__magic_name__ ) # We keep only the field we are interested in a = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(__magic_name__ , (list, tuple) ): a = set().union(*[row.keys() for row in dataset] ) a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys} else: a = dataset a = pa.Table.from_pydict(__magic_name__ ) yield file_idx, self._cast_table(__magic_name__ ) # If the file has one json object per line else: with open(__magic_name__ , """rb""" ) as f: a = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small a = max(self.config.chunksize // 32 , 16 << 10 ) a = ( self.config.encoding_errors if self.config.encoding_errors is not None else """strict""" ) while True: a = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(__magic_name__ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": a = batch.decode(self.config.encoding , errors=__magic_name__ ).encode("""utf-8""" ) try: while True: try: a = paj.read_json( io.BytesIO(__magic_name__ ) , read_options=paj.ReadOptions(block_size=__magic_name__ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(__magic_name__ , pa.ArrowInvalid ) and "straddling" not in str(__magic_name__ ) or block_size > len(__magic_name__ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F'Batch of {len(__magic_name__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( __magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: a = json.load(__magic_name__ ) except json.JSONDecodeError: logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(__magic_name__ , __magic_name__ ): # list is the only sequence type supported in JSON try: a = set().union(*[row.keys() for row in dataset] ) a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys} a = pa.Table.from_pydict(__magic_name__ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' ) raise ValueError(F'Not able to read records in the JSON file at {file}.' ) from None yield file_idx, self._cast_table(__magic_name__ ) break else: logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' ) raise ValueError( F'Not able to read records in the JSON file at {file}. ' F'You should probably indicate the field of the JSON file containing your records. ' F'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ' F'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(__magic_name__ ) batch_idx += 1
347
1
from PIL import Image def __A ( __lowerCamelCase ) -> Image: a , a = image.size a = 0 a = image.load() for i in range(__lowerCamelCase ): for j in range(__lowerCamelCase ): a = pixels[j, i] mean += pixel mean //= width * height for j in range(__lowerCamelCase ): for i in range(__lowerCamelCase ): a = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": __UpperCamelCase : str = mean_threshold(Image.open("path_to_image").convert("L")) image.save("output_image_path")
347
import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __UpperCamelCase : Dict = logging.get_logger(__name__) class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = ['''input_ids''', '''attention_mask'''] def __init__( self :List[str] , __magic_name__ :int="</s>" , __magic_name__ :List[Any]="<unk>" , __magic_name__ :Optional[Any]="<pad>" , __magic_name__ :Optional[int]=125 , __magic_name__ :List[str]=None , **__magic_name__ :List[str] , ): '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: a = [F'<extra_id_{i}>' for i in range(__magic_name__ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens a = len(set(filter(lambda __magic_name__ : bool("""extra_id""" in str(__magic_name__ ) ) , __magic_name__ ) ) ) if extra_tokens != extra_ids: raise ValueError( F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are' """ provided to ByT5Tokenizer. In this case the additional_special_tokens must include the""" """ extra_ids tokens""" ) a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token super().__init__( eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , extra_ids=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , ) a = extra_ids a = 2**8 # utf is 8 bits # define special tokens dict a = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } a = len(self.special_tokens_encoder ) a = len(__magic_name__ ) for i, token in enumerate(__magic_name__ ): a = self.vocab_size + i - n a = {v: k for k, v in self.special_tokens_encoder.items()} @property def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def lowerCamelCase__ ( self :Any , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None , __magic_name__ :bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(__magic_name__ )) + [1] return ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1] def lowerCamelCase__ ( self :str , __magic_name__ :List[int] ): '''simple docstring''' if len(__magic_name__ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated' """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ): '''simple docstring''' a = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ): '''simple docstring''' a = self._add_eos_if_not_present(__magic_name__ ) if token_ids_a is None: return token_ids_a else: a = self._add_eos_if_not_present(__magic_name__ ) return token_ids_a + token_ids_a def lowerCamelCase__ ( self :List[str] , __magic_name__ :str ): '''simple docstring''' a = [chr(__magic_name__ ) for i in text.encode("""utf-8""" )] return tokens def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ): '''simple docstring''' if token in self.special_tokens_encoder: a = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: a = self.added_tokens_encoder[token] elif len(__magic_name__ ) != 1: a = self.unk_token_id else: a = ord(__magic_name__ ) + self._num_special_tokens return token_id def lowerCamelCase__ ( self :List[str] , __magic_name__ :Dict ): '''simple docstring''' if index in self.special_tokens_decoder: a = self.special_tokens_decoder[index] else: a = chr(index - self._num_special_tokens ) return token def lowerCamelCase__ ( self :Tuple , __magic_name__ :Optional[int] ): '''simple docstring''' a = b"""""" for token in tokens: if token in self.special_tokens_decoder: a = self.special_tokens_decoder[token].encode("""utf-8""" ) elif token in self.added_tokens_decoder: a = self.special_tokens_decoder[token].encode("""utf-8""" ) elif token in self.special_tokens_encoder: a = token.encode("""utf-8""" ) elif token in self.added_tokens_encoder: a = token.encode("""utf-8""" ) else: a = bytes([ord(__magic_name__ )] ) bstring += tok_string a = bstring.decode("""utf-8""" , errors="""ignore""" ) return string def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :str , __magic_name__ :Optional[str] = None ): '''simple docstring''' return ()
347
1
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( """files""" , [ ["""full:README.md""", """dataset_infos.json"""], ["""empty:README.md""", """dataset_infos.json"""], ["""dataset_infos.json"""], ["""full:README.md"""], ] , ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> Tuple: a = tmp_path_factory.mktemp("""dset_infos_dir""" ) if "full:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""---\ndataset_info:\n dataset_size: 42\n---""" ) if "empty:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""""" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f: f.write("""{\"default\": {\"dataset_size\": 42}}""" ) a = DatasetInfosDict.from_directory(__lowerCamelCase ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( """dataset_info""" , [ DatasetInfo(), DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ), ] , ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]: a = str(__lowerCamelCase ) dataset_info.write_to_directory(__lowerCamelCase ) a = DatasetInfo.from_directory(__lowerCamelCase ) assert dataset_info == reloaded assert os.path.exists(os.path.join(__lowerCamelCase , """dataset_info.json""" ) ) def __A ( ) -> Optional[int]: a = DatasetInfo( description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) a = dataset_info._to_yaml_dict() assert sorted(__lowerCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) a = yaml.safe_dump(__lowerCamelCase ) a = yaml.safe_load(__lowerCamelCase ) assert dataset_info_yaml_dict == reloaded def __A ( ) -> List[str]: a = DatasetInfo() a = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( """dataset_infos_dict""" , [ DatasetInfosDict(), DatasetInfosDict({"""default""": DatasetInfo()} ), DatasetInfosDict({"""my_config_name""": DatasetInfo()} ), DatasetInfosDict( { """default""": DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ) } ), DatasetInfosDict( { """v1""": DatasetInfo(dataset_size=42 ), """v2""": DatasetInfo(dataset_size=1337 ), } ), ] , ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]: a = str(__lowerCamelCase ) dataset_infos_dict.write_to_directory(__lowerCamelCase ) a = DatasetInfosDict.from_directory(__lowerCamelCase ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): a = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml a = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(__lowerCamelCase , """README.md""" ) )
347
import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __lowerCAmelCase : def __init__( self :Optional[int] , __magic_name__ :str , __magic_name__ :int=2 , __magic_name__ :List[str]=3 , __magic_name__ :Optional[int]=4 , __magic_name__ :str=2 , __magic_name__ :Any=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Dict=True , __magic_name__ :List[Any]=99 , __magic_name__ :Dict=36 , __magic_name__ :Optional[Any]=3 , __magic_name__ :str=4 , __magic_name__ :Optional[Any]=37 , __magic_name__ :Dict="gelu" , __magic_name__ :Any=0.1 , __magic_name__ :Union[str, Any]=0.1 , __magic_name__ :Dict=512 , __magic_name__ :str=16 , __magic_name__ :List[Any]=2 , __magic_name__ :Tuple=0.02 , __magic_name__ :Any=6 , __magic_name__ :Optional[int]=6 , __magic_name__ :Tuple=3 , __magic_name__ :str=4 , __magic_name__ :List[str]=None , __magic_name__ :str=1000 , ): '''simple docstring''' a = parent a = batch_size a = num_channels a = image_size a = patch_size a = text_seq_length a = is_training a = use_input_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = coordinate_size a = shape_size a = num_labels a = num_choices a = scope a = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) a = text_seq_length a = (image_size // patch_size) ** 2 + 1 a = self.text_seq_length + self.image_seq_length def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) a = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: a = bbox[i, j, 3] a = bbox[i, j, 1] a = t if bbox[i, j, 2] < bbox[i, j, 0]: a = bbox[i, j, 2] a = bbox[i, j, 0] a = t a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a = None if self.use_input_mask: a = random_attention_mask([self.batch_size, self.text_seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) a = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowerCamelCase__ ( self :int , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :List[str] , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int ): '''simple docstring''' a = LayoutLMvaModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() # text + image a = model(__magic_name__ , pixel_values=__magic_name__ ) a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , token_type_ids=__magic_name__ ) a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only a = model(__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only a = model(pixel_values=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :List[Any] , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :List[str] , __magic_name__ :List[str] ): '''simple docstring''' a = self.num_labels a = LayoutLMvaForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :Dict , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :List[str] , __magic_name__ :Tuple ): '''simple docstring''' a = self.num_labels a = LayoutLMvaForTokenClassification(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :str , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :str , __magic_name__ :List[str] , __magic_name__ :Optional[int] , __magic_name__ :Optional[Any] ): '''simple docstring''' a = LayoutLMvaForQuestionAnswering(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) = config_and_inputs a = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) UpperCamelCase__ = ( {'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel} if is_torch_available() else {} ) def lowerCamelCase__ ( self :List[str] , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :List[Any] ): '''simple docstring''' return True def lowerCamelCase__ ( self :int ): '''simple docstring''' a = LayoutLMvaModelTester(self ) a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :Any=False ): '''simple docstring''' a = copy.deepcopy(__magic_name__ ) if model_class in get_values(__magic_name__ ): a = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(__magic_name__ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__magic_name__ ): a = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) elif model_class in get_values(__magic_name__ ): a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) elif model_class in [ *get_values(__magic_name__ ), ]: a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) elif model_class in [ *get_values(__magic_name__ ), ]: a = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__magic_name__ , ) return inputs_dict def lowerCamelCase__ ( self :str ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a = type self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__magic_name__ ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__magic_name__ ) @slow def lowerCamelCase__ ( self :int ): '''simple docstring''' for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = LayoutLMvaModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def __A ( ) -> str: a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None @slow def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(__magic_name__ ) a = self.default_image_processor a = prepare_img() a = image_processor(images=__magic_name__ , return_tensors="""pt""" ).pixel_values.to(__magic_name__ ) a = torch.tensor([[1, 2]] ) a = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass a = model( input_ids=input_ids.to(__magic_name__ ) , bbox=bbox.to(__magic_name__ ) , pixel_values=pixel_values.to(__magic_name__ ) , ) # verify the logits a = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ ) a = torch.tensor( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
347
1
import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __lowerCAmelCase : @staticmethod def lowerCamelCase__ ( *__magic_name__ :Optional[int] , **__magic_name__ :Any ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_timm @require_torch class __lowerCAmelCase ( unittest.TestCase ): UpperCamelCase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING def lowerCamelCase__ ( self :Any , __magic_name__ :str , __magic_name__ :List[Any] , __magic_name__ :Optional[int] ): '''simple docstring''' a = ObjectDetectionPipeline(model=__magic_name__ , image_processor=__magic_name__ ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :str ): '''simple docstring''' a = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 ) self.assertGreater(len(__magic_name__ ) , 0 ) for detected_object in outputs: self.assertEqual( __magic_name__ , { """score""": ANY(__magic_name__ ), """label""": ANY(__magic_name__ ), """box""": {"""xmin""": ANY(__magic_name__ ), """ymin""": ANY(__magic_name__ ), """xmax""": ANY(__magic_name__ ), """ymax""": ANY(__magic_name__ )}, } , ) import datasets a = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" ) a = [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] a = object_detector(__magic_name__ , threshold=0.0 ) self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) ) for outputs in batch_outputs: self.assertGreater(len(__magic_name__ ) , 0 ) for detected_object in outputs: self.assertEqual( __magic_name__ , { """score""": ANY(__magic_name__ ), """label""": ANY(__magic_name__ ), """box""": {"""xmin""": ANY(__magic_name__ ), """ymin""": ANY(__magic_name__ ), """xmax""": ANY(__magic_name__ ), """ymax""": ANY(__magic_name__ )}, } , ) @require_tf @unittest.skip("""Object detection not implemented in TF""" ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' pass @require_torch def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = """hf-internal-testing/tiny-detr-mobilenetsv3""" a = AutoModelForObjectDetection.from_pretrained(__magic_name__ ) a = AutoFeatureExtractor.from_pretrained(__magic_name__ ) a = ObjectDetectionPipeline(model=__magic_name__ , feature_extractor=__magic_name__ ) a = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 ) self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [ {"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ] , ) a = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [ [ {"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ], [ {"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ], ] , ) @require_torch @slow def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = """facebook/detr-resnet-50""" a = AutoModelForObjectDetection.from_pretrained(__magic_name__ ) a = AutoFeatureExtractor.from_pretrained(__magic_name__ ) a = ObjectDetectionPipeline(model=__magic_name__ , feature_extractor=__magic_name__ ) a = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [ {"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) a = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [ [ {"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], [ {"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], ] , ) @require_torch @slow def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = """facebook/detr-resnet-50""" a = pipeline("""object-detection""" , model=__magic_name__ ) a = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [ {"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) a = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [ [ {"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], [ {"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], ] , ) @require_torch @slow def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = 0.9985 a = """facebook/detr-resnet-50""" a = pipeline("""object-detection""" , model=__magic_name__ ) a = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=__magic_name__ ) self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [ {"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) @require_torch @require_pytesseract @slow def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = """Narsil/layoutlmv3-finetuned-funsd""" a = 0.9993 a = pipeline("""object-detection""" , model=__magic_name__ , threshold=__magic_name__ ) a = object_detector( """https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" ) self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [ {"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}}, {"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}}, ] , )
347
from copy import deepcopy class __lowerCAmelCase : def __init__( self :Union[str, Any] , __magic_name__ :list[int] | None = None , __magic_name__ :int | None = None ): '''simple docstring''' if arr is None and size is not None: a = size a = [0] * size elif arr is not None: self.init(__magic_name__ ) else: raise ValueError("""Either arr or size must be specified""" ) def lowerCamelCase__ ( self :Dict , __magic_name__ :list[int] ): '''simple docstring''' a = len(__magic_name__ ) a = deepcopy(__magic_name__ ) for i in range(1 , self.size ): a = self.next_(__magic_name__ ) if j < self.size: self.tree[j] += self.tree[i] def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): a = self.next_(__magic_name__ ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def lowerCamelCase__ ( __magic_name__ :int ): '''simple docstring''' return index + (index & (-index)) @staticmethod def lowerCamelCase__ ( __magic_name__ :int ): '''simple docstring''' return index - (index & (-index)) def lowerCamelCase__ ( self :Any , __magic_name__ :int , __magic_name__ :int ): '''simple docstring''' if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value a = self.next_(__magic_name__ ) def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :int , __magic_name__ :int ): '''simple docstring''' self.add(__magic_name__ , value - self.get(__magic_name__ ) ) def lowerCamelCase__ ( self :int , __magic_name__ :int ): '''simple docstring''' if right == 0: return 0 a = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] a = self.prev(__magic_name__ ) return result def lowerCamelCase__ ( self :int , __magic_name__ :int , __magic_name__ :int ): '''simple docstring''' return self.prefix(__magic_name__ ) - self.prefix(__magic_name__ ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :int ): '''simple docstring''' return self.query(__magic_name__ , index + 1 ) def lowerCamelCase__ ( self :Dict , __magic_name__ :int ): '''simple docstring''' value -= self.tree[0] if value < 0: return -1 a = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 a = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
347
1
import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate __UpperCamelCase : Tuple = trt.Logger(trt.Logger.WARNING) __UpperCamelCase : Optional[Any] = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) __UpperCamelCase : str = logging.getLogger(__name__) __UpperCamelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--onnx_model_path", default=None, type=str, required=True, help="Path to ONNX model: ", ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.", ) # Other parameters parser.add_argument( "--tokenizer_name", default="", type=str, required=True, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--version_2_with_negative", action="store_true", help="If true, the SQuAD examples contain some that do not have an answer.", ) parser.add_argument( "--null_score_diff_threshold", type=float, default=0.0, help="If null_score - best_non_null is greater than the threshold predict null.", ) parser.add_argument( "--max_seq_length", default=384, type=int, help=( "The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded." ), ) parser.add_argument( "--doc_stride", default=128, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.", ) parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument( "--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json output file.", ) parser.add_argument( "--max_answer_length", default=30, type=int, help=( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ), ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--dataset_name", type=str, default=None, required=True, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data." ) parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision instead of 32-bit", ) parser.add_argument( "--int8", action="store_true", help="Whether to use INT8", ) __UpperCamelCase : int = parser.parse_args() if args.tokenizer_name: __UpperCamelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) logger.info("Training/evaluation parameters %s", args) __UpperCamelCase : Any = args.per_device_eval_batch_size __UpperCamelCase : Union[str, Any] = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties __UpperCamelCase : Dict = True __UpperCamelCase : List[str] = "temp_engine/bert-fp32.engine" if args.fpaa: __UpperCamelCase : str = "temp_engine/bert-fp16.engine" if args.inta: __UpperCamelCase : Dict = "temp_engine/bert-int8.engine" # import ONNX file if not os.path.exists("temp_engine"): os.makedirs("temp_engine") __UpperCamelCase : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, "rb") as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network __UpperCamelCase : Tuple = [network.get_input(i) for i in range(network.num_inputs)] __UpperCamelCase : int = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: __UpperCamelCase : Tuple = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) __UpperCamelCase : Union[str, Any] = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) __UpperCamelCase : List[str] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, "wb") as f: f.write(engine.serialize()) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]: a = np.asarray(inputs["""input_ids"""] , dtype=np.intaa ) a = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa ) a = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __lowerCamelCase ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __lowerCamelCase ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __lowerCamelCase ) # start time a = time.time() # Run inference context.execute_async( bindings=[int(__lowerCamelCase ) for d_inp in d_inputs] + [int(__lowerCamelCase ), int(__lowerCamelCase )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) cuda.memcpy_dtoh_async(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Synchronize the stream and take time stream.synchronize() # end time a = time.time() a = end_time - start_time a = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. __UpperCamelCase : str = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. __UpperCamelCase : List[str] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError("Evaluation requires a dataset name") # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. __UpperCamelCase : Tuple = raw_datasets["validation"].column_names __UpperCamelCase : int = "question" if "question" in column_names else column_names[0] __UpperCamelCase : Any = "context" if "context" in column_names else column_names[1] __UpperCamelCase : Optional[Any] = "answers" if "answers" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). __UpperCamelCase : Tuple = tokenizer.padding_side == "right" if args.max_seq_length > tokenizer.model_max_length: logger.warning( F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the' F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) __UpperCamelCase : Any = min(args.max_seq_length, tokenizer.model_max_length) def __A ( __lowerCamelCase ) -> int: # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace a = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. a = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=__lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. a = tokenized_examples.pop("""overflow_to_sample_mapping""" ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. a = [] for i in range(len(tokenized_examples["""input_ids"""] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). a = tokenized_examples.sequence_ids(__lowerCamelCase ) a = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. a = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. a = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] ) ] return tokenized_examples __UpperCamelCase : List[str] = raw_datasets["validation"] # Validation Feature Creation __UpperCamelCase : Tuple = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="Running tokenizer on validation dataset", ) __UpperCamelCase : List[Any] = default_data_collator __UpperCamelCase : str = eval_dataset.remove_columns(["example_id", "offset_mapping"]) __UpperCamelCase : int = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="eval" ) -> Optional[int]: # Post-processing: we match the start logits and end logits to answers in the original context. a = postprocess_qa_predictions( examples=__lowerCamelCase , features=__lowerCamelCase , predictions=__lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__lowerCamelCase , ) # Format the result to the format the metric expects. if args.version_2_with_negative: a = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: a = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] a = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=__lowerCamelCase , label_ids=__lowerCamelCase ) __UpperCamelCase : Optional[int] = load_metric("squad_v2" if args.version_2_with_negative else "squad") # Evaluation! logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path) with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def __A ( __lowerCamelCase ) -> Dict: return trt.volume(engine.get_binding_shape(__lowerCamelCase ) ) * engine.get_binding_dtype(__lowerCamelCase ).itemsize # Allocate device memory for inputs and outputs. __UpperCamelCase : List[Any] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer __UpperCamelCase : Optional[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) __UpperCamelCase : List[str] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) __UpperCamelCase : List[str] = cuda.mem_alloc(h_outputa.nbytes) __UpperCamelCase : Dict = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. __UpperCamelCase : List[str] = cuda.Stream() # Evaluation logger.info("***** Running Evaluation *****") logger.info(F' Num examples = {len(eval_dataset)}') logger.info(F' Batch size = {args.per_device_eval_batch_size}') __UpperCamelCase : List[str] = 0.0 __UpperCamelCase : Optional[int] = 0 __UpperCamelCase : Dict = timeit.default_timer() __UpperCamelCase : List[Any] = None for step, batch in enumerate(eval_dataloader): __UpperCamelCase , __UpperCamelCase : Any = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 __UpperCamelCase , __UpperCamelCase : Tuple = outputs __UpperCamelCase : Any = torch.tensor(start_logits) __UpperCamelCase : Union[str, Any] = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered __UpperCamelCase : str = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) __UpperCamelCase : Optional[Any] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) __UpperCamelCase : Optional[int] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) __UpperCamelCase : Optional[Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: __UpperCamelCase : int = nested_truncate(all_preds, len(eval_dataset)) __UpperCamelCase : Optional[int] = timeit.default_timer() - start_time logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1_000 / niter)) logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1_000)) logger.info("Total Number of Inference = %d", niter) __UpperCamelCase : Optional[Any] = post_processing_function(eval_examples, eval_dataset, all_preds) __UpperCamelCase : Tuple = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(F'Evaluation metrics: {eval_metric}')
347
from __future__ import annotations from typing import Generic, TypeVar __UpperCamelCase : Union[str, Any] = TypeVar("T") class __lowerCAmelCase ( Generic[T] ): def __init__( self :Tuple , __magic_name__ :T ): '''simple docstring''' a = data a = self a = 0 class __lowerCAmelCase ( Generic[T] ): def __init__( self :Tuple ): '''simple docstring''' a = {} def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :T ): '''simple docstring''' a = DisjointSetTreeNode(__magic_name__ ) def lowerCamelCase__ ( self :List[Any] , __magic_name__ :T ): '''simple docstring''' a = self.map[data] if elem_ref != elem_ref.parent: a = self.find_set(elem_ref.parent.data ) return elem_ref.parent def lowerCamelCase__ ( self :List[Any] , __magic_name__ :DisjointSetTreeNode[T] , __magic_name__ :DisjointSetTreeNode[T] ): '''simple docstring''' if nodea.rank > nodea.rank: a = nodea else: a = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :T , __magic_name__ :T ): '''simple docstring''' self.link(self.find_set(__magic_name__ ) , self.find_set(__magic_name__ ) ) class __lowerCAmelCase ( Generic[T] ): def __init__( self :Union[str, Any] ): '''simple docstring''' a = {} def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :T ): '''simple docstring''' if node not in self.connections: a = {} def lowerCamelCase__ ( self :Any , __magic_name__ :T , __magic_name__ :T , __magic_name__ :int ): '''simple docstring''' self.add_node(__magic_name__ ) self.add_node(__magic_name__ ) a = weight a = weight def lowerCamelCase__ ( self :int ): '''simple docstring''' a = [] a = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda __magic_name__ : x[2] ) # creating the disjoint set a = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(__magic_name__ ) # MST generation a = 0 a = 0 a = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: a , a , a = edges[index] index += 1 a = disjoint_set.find_set(__magic_name__ ) a = disjoint_set.find_set(__magic_name__ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(__magic_name__ , __magic_name__ , __magic_name__ ) disjoint_set.union(__magic_name__ , __magic_name__ ) return graph
347
1
def __A ( __lowerCamelCase = 50 ) -> int: a = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(F'{solution() = }')
347
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = tempfile.mkdtemp() a = BlipImageProcessor() a = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) a = BlipProcessor(__magic_name__ , __magic_name__ ) processor.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self :List[Any] , **__magic_name__ :Union[str, Any] ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer def lowerCamelCase__ ( self :str , **__magic_name__ :List[str] ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor def lowerCamelCase__ ( self :int ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] a = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) a = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) a = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 ) a = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__magic_name__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __magic_name__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __magic_name__ ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = self.prepare_image_inputs() a = image_processor(__magic_name__ , return_tensors="""np""" ) a = processor(images=__magic_name__ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = """lower newer""" a = processor(text=__magic_name__ ) a = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = """lower newer""" a = self.prepare_image_inputs() a = processor(text=__magic_name__ , images=__magic_name__ ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(__magic_name__ ): processor() def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a = processor.batch_decode(__magic_name__ ) a = tokenizer.batch_decode(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = """lower newer""" a = self.prepare_image_inputs() a = processor(text=__magic_name__ , images=__magic_name__ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
347
1
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file __UpperCamelCase : Union[str, Any] = "Run commands across TPU VMs for initial setup before running `accelerate launch`." def __A ( __lowerCamelCase=None ) -> Union[str, Any]: if subparsers is not None: a = subparsers.add_parser("""tpu-config""" , description=_description ) else: a = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description ) # Core arguments a = parser.add_argument_group( """Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" ) config_args.add_argument( """--config_file""" , type=__lowerCamelCase , default=__lowerCamelCase , help="""Path to the config file to use for accelerate.""" , ) config_args.add_argument( """--tpu_name""" , default=__lowerCamelCase , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , ) config_args.add_argument( """--tpu_zone""" , default=__lowerCamelCase , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , ) a = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" ) pod_args.add_argument( """--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , ) pod_args.add_argument( """--command_file""" , default=__lowerCamelCase , help="""The path to the file containing the commands to run on the pod on startup.""" , ) pod_args.add_argument( """--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , ) pod_args.add_argument( """--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , ) pod_args.add_argument( """--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , ) pod_args.add_argument( """--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" ) if subparsers is not None: parser.set_defaults(func=__lowerCamelCase ) return parser def __A ( __lowerCamelCase ) -> Union[str, Any]: a = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(__lowerCamelCase ): a = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: a = defaults.command_file if not args.command and defaults.commands is not None: a = defaults.commands if not args.tpu_name: a = defaults.tpu_name if not args.tpu_zone: a = defaults.tpu_zone if args.accelerate_version == "dev": a = """git+https://github.com/huggingface/accelerate.git""" elif args.accelerate_version == "latest": a = """accelerate -U""" elif isinstance(parse(args.accelerate_version ) , __lowerCamelCase ): a = f'accelerate=={args.accelerate_version}' if not args.command_file and not args.command: raise ValueError("""You must specify either a command file or a command to run on the pod.""" ) if args.command_file: with open(args.command_file , """r""" ) as f: a = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , __lowerCamelCase ): a = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate a = ["""cd /usr/share"""] if args.install_accelerate: new_cmd += [f'pip install {args.accelerate_version}'] new_cmd += args.command a = """; """.join(__lowerCamelCase ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess a = ["""gcloud"""] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(f'Running {" ".join(__lowerCamelCase )}' ) return subprocess.run(__lowerCamelCase ) print("""Successfully setup pod.""" ) def __A ( ) -> Dict: a = tpu_command_parser() a = parser.parse_args() tpu_command_launcher(__lowerCamelCase )
347
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) __UpperCamelCase : int = { "shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json", # See all Nat models at https://huggingface.co/models?filter=nat } class __lowerCAmelCase ( __magic_name__ , __magic_name__ ): UpperCamelCase__ = '''nat''' UpperCamelCase__ = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self :Any , __magic_name__ :int=4 , __magic_name__ :Dict=3 , __magic_name__ :List[str]=64 , __magic_name__ :Optional[int]=[3, 4, 6, 5] , __magic_name__ :int=[2, 4, 8, 16] , __magic_name__ :str=7 , __magic_name__ :Tuple=3.0 , __magic_name__ :Dict=True , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.1 , __magic_name__ :Optional[Any]="gelu" , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :Tuple=1E-5 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :int=None , __magic_name__ :Any=None , **__magic_name__ :Dict , ): '''simple docstring''' super().__init__(**__magic_name__ ) a = patch_size a = num_channels a = embed_dim a = depths a = len(__magic_name__ ) a = num_heads a = kernel_size a = mlp_ratio a = qkv_bias a = hidden_dropout_prob a = attention_probs_dropout_prob a = drop_path_rate a = hidden_act a = layer_norm_eps a = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model a = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) ) a = layer_scale_init_value a = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(__magic_name__ ) + 1 )] a , a = get_aligned_output_features_output_indices( out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
347
1
from copy import deepcopy class __lowerCAmelCase : def __init__( self :Union[str, Any] , __magic_name__ :list[int] | None = None , __magic_name__ :int | None = None ): '''simple docstring''' if arr is None and size is not None: a = size a = [0] * size elif arr is not None: self.init(__magic_name__ ) else: raise ValueError("""Either arr or size must be specified""" ) def lowerCamelCase__ ( self :Dict , __magic_name__ :list[int] ): '''simple docstring''' a = len(__magic_name__ ) a = deepcopy(__magic_name__ ) for i in range(1 , self.size ): a = self.next_(__magic_name__ ) if j < self.size: self.tree[j] += self.tree[i] def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): a = self.next_(__magic_name__ ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def lowerCamelCase__ ( __magic_name__ :int ): '''simple docstring''' return index + (index & (-index)) @staticmethod def lowerCamelCase__ ( __magic_name__ :int ): '''simple docstring''' return index - (index & (-index)) def lowerCamelCase__ ( self :Any , __magic_name__ :int , __magic_name__ :int ): '''simple docstring''' if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value a = self.next_(__magic_name__ ) def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :int , __magic_name__ :int ): '''simple docstring''' self.add(__magic_name__ , value - self.get(__magic_name__ ) ) def lowerCamelCase__ ( self :int , __magic_name__ :int ): '''simple docstring''' if right == 0: return 0 a = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] a = self.prev(__magic_name__ ) return result def lowerCamelCase__ ( self :int , __magic_name__ :int , __magic_name__ :int ): '''simple docstring''' return self.prefix(__magic_name__ ) - self.prefix(__magic_name__ ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :int ): '''simple docstring''' return self.query(__magic_name__ , index + 1 ) def lowerCamelCase__ ( self :Dict , __magic_name__ :int ): '''simple docstring''' value -= self.tree[0] if value < 0: return -1 a = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 a = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
347
import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]: if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer a = flax_key_tuple[:-1] + ("""weight""",) a = torch.permute(__lowerCamelCase , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ): # linear layer a = flax_key_tuple[:-1] + ("""weight""",) a = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: a = flax_key_tuple[:-1] + ("""weight""",) return flax_key_tuple, flax_tensor def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]: if "metadata" in layer: a = layer.split("""metadata""" ) a = """""".join(split_layer[0] )[:-1] a = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )] elif "kvstore" in layer: a = layer.split("""kvstore""" ) a = """""".join(split_layer[0] )[:-1] a = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )] else: a = layer.split("""/""" ) a = """/""".join(split_layer[:-1] ) a = (split_layer[-1],) if "kvstore/path" in layer: a = f'{switch_checkpoint_path}/{checkpoint_info[layer]}' elif "kvstore/driver" in layer: a = """file""" else: a = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def __A ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]: a = rename_keys(__lowerCamelCase ) a = {} for k, v in current_block.items(): a = v a = new_current_block torch.save(__lowerCamelCase , __lowerCamelCase ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = WEIGHTS_NAME ) -> List[str]: a = convert_file_size_to_int(__lowerCamelCase ) a = [] a = {} a = 0 a = 0 os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp: a = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""] a = flatten_dict(__lowerCamelCase , sep="""/""" ) a = {} for layer in checkpoint_info.keys(): a , a , a = get_key_and_tensorstore_dict( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if curr_real_layer_name in all_layers: a = content else: a = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file a = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() a = torch.tensor(__lowerCamelCase ) a = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts a , a = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __lowerCamelCase ) a = """/""".join(__lowerCamelCase ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: a = os.path.join( __lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) ) rename_and_save_block(__lowerCamelCase , __lowerCamelCase ) sharded_state_dicts.append(current_block.keys() ) del current_block a = {} a = 0 a = raw_weights.to(getattr(__lowerCamelCase , __lowerCamelCase ) ) current_block_size += weight_size total_size += weight_size # Add the last block a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) ) rename_and_save_block(__lowerCamelCase , __lowerCamelCase ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(__lowerCamelCase ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index a = {} a = {} for idx, shard in enumerate(__lowerCamelCase ): a = weights_name.replace( """.bin""" , f'-{idx+1:05d}-of-{len(__lowerCamelCase ):05d}.bin' ) # len(sharded_state_dicts):05d} a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{idx+1:05d}-of-???.bin' ) ) os.rename(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) ) a = shard for key in shard: a = shard_file # Add the metadata a = {"""total_size""": total_size} a = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f: a = json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase ) + """\n""" f.write(__lowerCamelCase ) return metadata, index if __name__ == "__main__": __UpperCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600", type=str, required=False, help="Path to a directory containing a folder per layer. Follows the original Google format.", ) parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size") parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model") parser.add_argument( "--pytorch_dump_folder_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted", type=str, required=False, help="Path to the output pytorch model.", ) __UpperCamelCase : Any = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def __A ( ) -> Tuple: from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer a = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" ) config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" ) a = SwitchTransformersForConditionalGeneration.from_pretrained( """/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" ) a = TaTokenizer.from_pretrained("""t5-small""" ) a = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""" a = tokenizer(__lowerCamelCase , return_tensors="""pt""" ).input_ids a = model.generate(__lowerCamelCase , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
347
1
import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters __UpperCamelCase : Union[str, Any] = (720, 1_280) # Height, Width __UpperCamelCase : Any = (0.4, 0.6) # if height or width lower than this scale, drop it. __UpperCamelCase : str = 1 / 100 __UpperCamelCase : Optional[int] = "" __UpperCamelCase : List[Any] = "" __UpperCamelCase : Union[str, Any] = "" __UpperCamelCase : Tuple = 250 def __A ( ) -> None: a , a = get_dataset(__lowerCamelCase , __lowerCamelCase ) for index in range(__lowerCamelCase ): a = random.sample(range(len(__lowerCamelCase ) ) , 4 ) a , a , a = update_image_and_anno( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , filter_scale=__lowerCamelCase , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' a = random_chars(32 ) a = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0] a = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}' cva.imwrite(f'{file_root}.jpg' , __lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' ) a = [] for anno in new_annos: a = anno[3] - anno[1] a = anno[4] - anno[2] a = anno[1] + width / 2 a = anno[2] + height / 2 a = f'{anno[0]} {x_center} {y_center} {width} {height}' annos_list.append(__lowerCamelCase ) with open(f'{file_root}.txt' , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> tuple[list, list]: a = [] a = [] for label_file in glob.glob(os.path.join(__lowerCamelCase , """*.txt""" ) ): a = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(__lowerCamelCase ) as in_file: a = in_file.readlines() a = os.path.join(__lowerCamelCase , f'{label_name}.jpg' ) a = [] for obj_list in obj_lists: a = obj_list.rstrip("""\n""" ).split(""" """ ) a = float(obj[1] ) - float(obj[3] ) / 2 a = float(obj[2] ) - float(obj[4] ) / 2 a = float(obj[1] ) + float(obj[3] ) / 2 a = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(__lowerCamelCase ) labels.append(__lowerCamelCase ) return img_paths, labels def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 0.0 , ) -> tuple[list, list, str]: a = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) a = int(scale_x * output_size[1] ) a = int(scale_y * output_size[0] ) a = [] a = [] for i, index in enumerate(__lowerCamelCase ): a = all_img_list[index] path_list.append(__lowerCamelCase ) a = all_annos[index] a = cva.imread(__lowerCamelCase ) if i == 0: # top-left a = cva.resize(__lowerCamelCase , (divid_point_x, divid_point_y) ) a = img for bbox in img_annos: a = bbox[1] * scale_x a = bbox[2] * scale_y a = bbox[3] * scale_x a = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right a = cva.resize(__lowerCamelCase , (output_size[1] - divid_point_x, divid_point_y) ) a = img for bbox in img_annos: a = scale_x + bbox[1] * (1 - scale_x) a = bbox[2] * scale_y a = scale_x + bbox[3] * (1 - scale_x) a = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left a = cva.resize(__lowerCamelCase , (divid_point_x, output_size[0] - divid_point_y) ) a = img for bbox in img_annos: a = bbox[1] * scale_x a = scale_y + bbox[2] * (1 - scale_y) a = bbox[3] * scale_x a = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right a = cva.resize( __lowerCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) a = img for bbox in img_annos: a = scale_x + bbox[1] * (1 - scale_x) a = scale_y + bbox[2] * (1 - scale_y) a = scale_x + bbox[3] * (1 - scale_x) a = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: a = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def __A ( __lowerCamelCase ) -> str: assert number_char > 1, "The number of character should greater than 1" a = ascii_lowercase + digits return "".join(random.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) ) if __name__ == "__main__": main() print("DONE ✅")
347
import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters __UpperCamelCase : Union[str, Any] = (720, 1_280) # Height, Width __UpperCamelCase : Any = (0.4, 0.6) # if height or width lower than this scale, drop it. __UpperCamelCase : str = 1 / 100 __UpperCamelCase : Optional[int] = "" __UpperCamelCase : List[Any] = "" __UpperCamelCase : Union[str, Any] = "" __UpperCamelCase : Tuple = 250 def __A ( ) -> None: a , a = get_dataset(__lowerCamelCase , __lowerCamelCase ) for index in range(__lowerCamelCase ): a = random.sample(range(len(__lowerCamelCase ) ) , 4 ) a , a , a = update_image_and_anno( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , filter_scale=__lowerCamelCase , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' a = random_chars(32 ) a = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0] a = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}' cva.imwrite(f'{file_root}.jpg' , __lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' ) a = [] for anno in new_annos: a = anno[3] - anno[1] a = anno[4] - anno[2] a = anno[1] + width / 2 a = anno[2] + height / 2 a = f'{anno[0]} {x_center} {y_center} {width} {height}' annos_list.append(__lowerCamelCase ) with open(f'{file_root}.txt' , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> tuple[list, list]: a = [] a = [] for label_file in glob.glob(os.path.join(__lowerCamelCase , """*.txt""" ) ): a = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(__lowerCamelCase ) as in_file: a = in_file.readlines() a = os.path.join(__lowerCamelCase , f'{label_name}.jpg' ) a = [] for obj_list in obj_lists: a = obj_list.rstrip("""\n""" ).split(""" """ ) a = float(obj[1] ) - float(obj[3] ) / 2 a = float(obj[2] ) - float(obj[4] ) / 2 a = float(obj[1] ) + float(obj[3] ) / 2 a = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(__lowerCamelCase ) labels.append(__lowerCamelCase ) return img_paths, labels def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 0.0 , ) -> tuple[list, list, str]: a = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) a = int(scale_x * output_size[1] ) a = int(scale_y * output_size[0] ) a = [] a = [] for i, index in enumerate(__lowerCamelCase ): a = all_img_list[index] path_list.append(__lowerCamelCase ) a = all_annos[index] a = cva.imread(__lowerCamelCase ) if i == 0: # top-left a = cva.resize(__lowerCamelCase , (divid_point_x, divid_point_y) ) a = img for bbox in img_annos: a = bbox[1] * scale_x a = bbox[2] * scale_y a = bbox[3] * scale_x a = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right a = cva.resize(__lowerCamelCase , (output_size[1] - divid_point_x, divid_point_y) ) a = img for bbox in img_annos: a = scale_x + bbox[1] * (1 - scale_x) a = bbox[2] * scale_y a = scale_x + bbox[3] * (1 - scale_x) a = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left a = cva.resize(__lowerCamelCase , (divid_point_x, output_size[0] - divid_point_y) ) a = img for bbox in img_annos: a = bbox[1] * scale_x a = scale_y + bbox[2] * (1 - scale_y) a = bbox[3] * scale_x a = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right a = cva.resize( __lowerCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) a = img for bbox in img_annos: a = scale_x + bbox[1] * (1 - scale_x) a = scale_y + bbox[2] * (1 - scale_y) a = scale_x + bbox[3] * (1 - scale_x) a = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: a = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def __A ( __lowerCamelCase ) -> str: assert number_char > 1, "The number of character should greater than 1" a = ascii_lowercase + digits return "".join(random.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) ) if __name__ == "__main__": main() print("DONE ✅")
347
1
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) a = (boundary[1] - boundary[0]) / steps a = boundary[0] a = boundary[1] a = make_points(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) a = 0.0 y += (h / 2.0) * f(__lowerCamelCase ) for i in x_i: # print(i) y += h * f(__lowerCamelCase ) y += (h / 2.0) * f(__lowerCamelCase ) return y def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]: a = a + h while x < (b - h): yield x a = x + h def __A ( __lowerCamelCase ) -> Dict: # enter your function here a = (x - 0) * (x - 0) return y def __A ( ) -> Optional[Any]: a = 0.0 # Lower bound of integration a = 1.0 # Upper bound of integration a = 10.0 # define number of steps or resolution a = [a, b] # define boundary of integration a = method_a(__lowerCamelCase , __lowerCamelCase ) print(f'y = {y}' ) if __name__ == "__main__": main()
347
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase : Optional[Any] = { "configuration_mobilenet_v2": [ "MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileNetV2Config", "MobileNetV2OnnxConfig", ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = ["MobileNetV2FeatureExtractor"] __UpperCamelCase : Tuple = ["MobileNetV2ImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = [ "MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileNetV2ForImageClassification", "MobileNetV2ForSemanticSegmentation", "MobileNetV2Model", "MobileNetV2PreTrainedModel", "load_tf_weights_in_mobilenet_v2", ] if TYPE_CHECKING: from .configuration_mobilenet_va import ( MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileNetVaConfig, MobileNetVaOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor from .image_processing_mobilenet_va import MobileNetVaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilenet_va import ( MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel, MobileNetVaPreTrainedModel, load_tf_weights_in_mobilenet_va, ) else: import sys __UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
347
1
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging __UpperCamelCase : Tuple = logging.get_logger(__name__) __UpperCamelCase : Optional[int] = { "t5-small": "https://huggingface.co/t5-small/resolve/main/config.json", "t5-base": "https://huggingface.co/t5-base/resolve/main/config.json", "t5-large": "https://huggingface.co/t5-large/resolve/main/config.json", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json", } class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = '''t5''' UpperCamelCase__ = ['''past_key_values'''] UpperCamelCase__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self :Optional[int] , __magic_name__ :Union[str, Any]=3_2128 , __magic_name__ :Dict=512 , __magic_name__ :int=64 , __magic_name__ :int=2048 , __magic_name__ :Any=6 , __magic_name__ :Union[str, Any]=None , __magic_name__ :Optional[int]=8 , __magic_name__ :List[str]=32 , __magic_name__ :Optional[Any]=128 , __magic_name__ :List[str]=0.1 , __magic_name__ :List[Any]=1E-6 , __magic_name__ :int=1.0 , __magic_name__ :str="relu" , __magic_name__ :Any=True , __magic_name__ :Tuple=True , __magic_name__ :Union[str, Any]=0 , __magic_name__ :Any=1 , **__magic_name__ :Any , ): '''simple docstring''' a = vocab_size a = d_model a = d_kv a = d_ff a = num_layers a = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry a = num_heads a = relative_attention_num_buckets a = relative_attention_max_distance a = dropout_rate a = layer_norm_epsilon a = initializer_factor a = feed_forward_proj a = use_cache a = self.feed_forward_proj.split("""-""" ) a = act_info[-1] a = act_info[0] == """gated""" if len(__magic_name__ ) > 1 and act_info[0] != "gated" or len(__magic_name__ ) > 2: raise ValueError( F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' """Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """ """'gated-gelu' or 'relu'""" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": a = """gelu_new""" super().__init__( pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , **__magic_name__ , ) class __lowerCAmelCase ( __magic_name__ ): @property def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = { """input_ids""": {0: """batch""", 1: """encoder_sequence"""}, """attention_mask""": {0: """batch""", 1: """encoder_sequence"""}, } if self.use_past: a = """past_encoder_sequence + sequence""" a = {0: """batch"""} a = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: a = {0: """batch""", 1: """decoder_sequence"""} a = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" ) return common_inputs @property def lowerCamelCase__ ( self :int ): '''simple docstring''' return 13
347
def __A ( __lowerCamelCase ) -> bool: if num < 0: return False a = num a = 0 while num > 0: a = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
347
1
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = { """task_specific_params""": { """summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4}, """summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4}, """summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6}, } } a = { """task_specific_params.summarization.length_penalty""": 1.0, """task_specific_params.summarization.max_length""": 128, """task_specific_params.summarization.min_length""": 12, """task_specific_params.summarization.num_beams""": 4, """task_specific_params.summarization_cnn.length_penalty""": 2.0, """task_specific_params.summarization_cnn.max_length""": 142, """task_specific_params.summarization_cnn.min_length""": 56, """task_specific_params.summarization_cnn.num_beams""": 4, """task_specific_params.summarization_xsum.length_penalty""": 1.0, """task_specific_params.summarization_xsum.max_length""": 62, """task_specific_params.summarization_xsum.min_length""": 11, """task_specific_params.summarization_xsum.num_beams""": 6, } self.assertEqual(flatten_dict(__magic_name__ ) , __magic_name__ ) def lowerCamelCase__ ( self :str ): '''simple docstring''' a = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(__magic_name__ ) , x.transpose() ) ) a = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(__magic_name__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = np.random.randn(3 , 4 ) a = torch.tensor(__magic_name__ ) self.assertTrue(np.allclose(transpose(__magic_name__ ) , transpose(__magic_name__ ).numpy() ) ) a = np.random.randn(3 , 4 , 5 ) a = torch.tensor(__magic_name__ ) self.assertTrue(np.allclose(transpose(__magic_name__ , axes=(1, 2, 0) ) , transpose(__magic_name__ , axes=(1, 2, 0) ).numpy() ) ) @require_tf def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = np.random.randn(3 , 4 ) a = tf.constant(__magic_name__ ) self.assertTrue(np.allclose(transpose(__magic_name__ ) , transpose(__magic_name__ ).numpy() ) ) a = np.random.randn(3 , 4 , 5 ) a = tf.constant(__magic_name__ ) self.assertTrue(np.allclose(transpose(__magic_name__ , axes=(1, 2, 0) ) , transpose(__magic_name__ , axes=(1, 2, 0) ).numpy() ) ) @require_flax def lowerCamelCase__ ( self :int ): '''simple docstring''' a = np.random.randn(3 , 4 ) a = jnp.array(__magic_name__ ) self.assertTrue(np.allclose(transpose(__magic_name__ ) , np.asarray(transpose(__magic_name__ ) ) ) ) a = np.random.randn(3 , 4 , 5 ) a = jnp.array(__magic_name__ ) self.assertTrue(np.allclose(transpose(__magic_name__ , axes=(1, 2, 0) ) , np.asarray(transpose(__magic_name__ , axes=(1, 2, 0) ) ) ) ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(__magic_name__ , (4, 3) ) , np.reshape(__magic_name__ , (4, 3) ) ) ) a = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(__magic_name__ , (12, 5) ) , np.reshape(__magic_name__ , (12, 5) ) ) ) @require_torch def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = np.random.randn(3 , 4 ) a = torch.tensor(__magic_name__ ) self.assertTrue(np.allclose(reshape(__magic_name__ , (4, 3) ) , reshape(__magic_name__ , (4, 3) ).numpy() ) ) a = np.random.randn(3 , 4 , 5 ) a = torch.tensor(__magic_name__ ) self.assertTrue(np.allclose(reshape(__magic_name__ , (12, 5) ) , reshape(__magic_name__ , (12, 5) ).numpy() ) ) @require_tf def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = np.random.randn(3 , 4 ) a = tf.constant(__magic_name__ ) self.assertTrue(np.allclose(reshape(__magic_name__ , (4, 3) ) , reshape(__magic_name__ , (4, 3) ).numpy() ) ) a = np.random.randn(3 , 4 , 5 ) a = tf.constant(__magic_name__ ) self.assertTrue(np.allclose(reshape(__magic_name__ , (12, 5) ) , reshape(__magic_name__ , (12, 5) ).numpy() ) ) @require_flax def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = np.random.randn(3 , 4 ) a = jnp.array(__magic_name__ ) self.assertTrue(np.allclose(reshape(__magic_name__ , (4, 3) ) , np.asarray(reshape(__magic_name__ , (4, 3) ) ) ) ) a = np.random.randn(3 , 4 , 5 ) a = jnp.array(__magic_name__ ) self.assertTrue(np.allclose(reshape(__magic_name__ , (12, 5) ) , np.asarray(reshape(__magic_name__ , (12, 5) ) ) ) ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(__magic_name__ ) , np.squeeze(__magic_name__ ) ) ) a = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(__magic_name__ , axis=2 ) , np.squeeze(__magic_name__ , axis=2 ) ) ) @require_torch def lowerCamelCase__ ( self :int ): '''simple docstring''' a = np.random.randn(1 , 3 , 4 ) a = torch.tensor(__magic_name__ ) self.assertTrue(np.allclose(squeeze(__magic_name__ ) , squeeze(__magic_name__ ).numpy() ) ) a = np.random.randn(1 , 4 , 1 , 5 ) a = torch.tensor(__magic_name__ ) self.assertTrue(np.allclose(squeeze(__magic_name__ , axis=2 ) , squeeze(__magic_name__ , axis=2 ).numpy() ) ) @require_tf def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = np.random.randn(1 , 3 , 4 ) a = tf.constant(__magic_name__ ) self.assertTrue(np.allclose(squeeze(__magic_name__ ) , squeeze(__magic_name__ ).numpy() ) ) a = np.random.randn(1 , 4 , 1 , 5 ) a = tf.constant(__magic_name__ ) self.assertTrue(np.allclose(squeeze(__magic_name__ , axis=2 ) , squeeze(__magic_name__ , axis=2 ).numpy() ) ) @require_flax def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = np.random.randn(1 , 3 , 4 ) a = jnp.array(__magic_name__ ) self.assertTrue(np.allclose(squeeze(__magic_name__ ) , np.asarray(squeeze(__magic_name__ ) ) ) ) a = np.random.randn(1 , 4 , 1 , 5 ) a = jnp.array(__magic_name__ ) self.assertTrue(np.allclose(squeeze(__magic_name__ , axis=2 ) , np.asarray(squeeze(__magic_name__ , axis=2 ) ) ) ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(__magic_name__ , axis=1 ) , np.expand_dims(__magic_name__ , axis=1 ) ) ) @require_torch def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = np.random.randn(3 , 4 ) a = torch.tensor(__magic_name__ ) self.assertTrue(np.allclose(expand_dims(__magic_name__ , axis=1 ) , expand_dims(__magic_name__ , axis=1 ).numpy() ) ) @require_tf def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = np.random.randn(3 , 4 ) a = tf.constant(__magic_name__ ) self.assertTrue(np.allclose(expand_dims(__magic_name__ , axis=1 ) , expand_dims(__magic_name__ , axis=1 ).numpy() ) ) @require_flax def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = np.random.randn(3 , 4 ) a = jnp.array(__magic_name__ ) self.assertTrue(np.allclose(expand_dims(__magic_name__ , axis=1 ) , np.asarray(expand_dims(__magic_name__ , axis=1 ) ) ) )
347
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): UpperCamelCase__ = CanineTokenizer UpperCamelCase__ = False def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' super().setUp() a = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowerCamelCase__ ( self :Dict ): '''simple docstring''' return CanineTokenizer.from_pretrained("""google/canine-s""" ) def lowerCamelCase__ ( self :Tuple , **__magic_name__ :Dict ): '''simple docstring''' a = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ ) a = 1024 return tokenizer @require_torch def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.canine_tokenizer a = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""] # fmt: off a = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0] # fmt: on a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) self.assertIsInstance(__magic_name__ , __magic_name__ ) a = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.canine_tokenizer a = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""] a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("""input_ids""" , __magic_name__ ) self.assertIn("""attention_mask""" , __magic_name__ ) self.assertIn("""token_type_ids""" , __magic_name__ ) @require_torch def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.canine_tokenizer a = [ """What's the weater?""", """It's about 25 degrees.""", ] a = tokenizer( text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test a = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc a = tempfile.mkdtemp() a = """ He is very happy, UNwant\u00E9d,running""" a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) a = tokenizer.__class__.from_pretrained(__magic_name__ ) a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) shutil.rmtree(__magic_name__ ) a = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc a = tempfile.mkdtemp() a = """ He is very happy, UNwant\u00E9d,running""" a = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: a = chr(0Xe_0_0_7 ) additional_special_tokens.append(__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) a = tokenizer.__class__.from_pretrained(__magic_name__ ) a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) a = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__magic_name__ ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a , a = self.get_clean_sequence(__magic_name__ ) # a special token for Canine can be defined as follows: a = 0Xe_0_0_5 a = chr(__magic_name__ ) tokenizer.add_special_tokens({"""cls_token""": special_token} ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) a = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(__magic_name__ , input_encoded + special_token_id ) a = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) self.assertTrue(special_token not in decoded ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a = chr(0Xe_0_0_5 ) a = chr(0Xe_0_0_6 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} ) a = tokenizer.tokenize(__magic_name__ ) a = tokenizer.tokenize(__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(token_a[0] , __magic_name__ ) self.assertEqual(token_a[0] , __magic_name__ ) @require_tokenizers def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # a special token for Canine can be defined as follows: a = 0Xe_0_0_6 a = chr(__magic_name__ ) a = AddedToken(__magic_name__ , lstrip=__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(__magic_name__ ) tokenizer.from_pretrained(__magic_name__ ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__magic_name__ ) with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: a = json.load(__magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: a = json.load(__magic_name__ ) # a special token for Canine can be defined as follows: a = 0Xe_0_0_6 a = chr(__magic_name__ ) a = [new_token_a] a = [new_token_a] with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files a = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) a = 0Xe_0_0_7 a = chr(__magic_name__ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained a = [AddedToken(__magic_name__ , lstrip=__magic_name__ )] a = tokenizer_class.from_pretrained( __magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a = """hello world""" if self.space_between_special_tokens: a = """[CLS] hello world [SEP]""" else: a = input a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) a = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(__magic_name__ , [output, output.lower()] ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] a = """a""" a = ord(__magic_name__ ) for attr in attributes_list: setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] ) a = 0Xe_0_0_6 a = chr(__magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' pass def lowerCamelCase__ ( self :str ): '''simple docstring''' pass def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass def lowerCamelCase__ ( self :Any ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass
347
1
from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig __UpperCamelCase : Any = logging.get_logger(__name__) # General docstring __UpperCamelCase : List[str] = "MobileNetV1Config" # Base docstring __UpperCamelCase : Tuple = "google/mobilenet_v1_1.0_224" __UpperCamelCase : int = [1, 1_024, 7, 7] # Image classification docstring __UpperCamelCase : List[str] = "google/mobilenet_v1_1.0_224" __UpperCamelCase : Optional[Any] = "tabby, tabby cat" __UpperCamelCase : Optional[int] = [ "google/mobilenet_v1_1.0_224", "google/mobilenet_v1_0.75_192", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ) -> Optional[Any]: a = {} if isinstance(__lowerCamelCase , __lowerCamelCase ): a = model.mobilenet_va else: a = model a = """MobilenetV1/Conv2d_0/""" a = backbone.conv_stem.convolution.weight a = backbone.conv_stem.normalization.bias a = backbone.conv_stem.normalization.weight a = backbone.conv_stem.normalization.running_mean a = backbone.conv_stem.normalization.running_var for i in range(13 ): a = i + 1 a = i * 2 a = backbone.layer[pt_index] a = f'MobilenetV1/Conv2d_{tf_index}_depthwise/' a = pointer.convolution.weight a = pointer.normalization.bias a = pointer.normalization.weight a = pointer.normalization.running_mean a = pointer.normalization.running_var a = backbone.layer[pt_index + 1] a = f'MobilenetV1/Conv2d_{tf_index}_pointwise/' a = pointer.convolution.weight a = pointer.normalization.bias a = pointer.normalization.weight a = pointer.normalization.running_mean a = pointer.normalization.running_var if isinstance(__lowerCamelCase , __lowerCamelCase ): a = """MobilenetV1/Logits/Conv2d_1c_1x1/""" a = model.classifier.weight a = model.classifier.bias return tf_to_pt_map def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]: try: import numpy as np import tensorflow as tf except ImportError: logger.error( """Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """ """https://www.tensorflow.org/install/ for installation instructions.""" ) raise # Load weights from TF model a = tf.train.list_variables(__lowerCamelCase ) a = {} for name, shape in init_vars: logger.info(f'Loading TF weight {name} with shape {shape}' ) a = tf.train.load_variable(__lowerCamelCase , __lowerCamelCase ) a = array # Build TF to PyTorch weights loading map a = _build_tf_to_pytorch_map(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) for name, pointer in tf_to_pt_map.items(): logger.info(f'Importing {name}' ) if name not in tf_weights: logger.info(f'{name} not in tf pre-trained weights, skipping' ) continue a = tf_weights[name] if "depthwise_weights" in name: logger.info("""Transposing depthwise""" ) a = np.transpose(__lowerCamelCase , (2, 3, 0, 1) ) elif "weights" in name: logger.info("""Transposing""" ) if len(pointer.shape ) == 2: # copying into linear layer a = array.squeeze().transpose() else: a = np.transpose(__lowerCamelCase , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' ) logger.info(f'Initialize PyTorch weight {name} {array.shape}' ) a = torch.from_numpy(__lowerCamelCase ) tf_weights.pop(__lowerCamelCase , __lowerCamelCase ) tf_weights.pop(name + """/RMSProp""" , __lowerCamelCase ) tf_weights.pop(name + """/RMSProp_1""" , __lowerCamelCase ) tf_weights.pop(name + """/ExponentialMovingAverage""" , __lowerCamelCase ) logger.info(f'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' ) return model def __A ( __lowerCamelCase , __lowerCamelCase ) -> torch.Tensor: a , a = features.shape[-2:] a , a = conv_layer.stride a , a = conv_layer.kernel_size if in_height % stride_height == 0: a = max(kernel_height - stride_height , 0 ) else: a = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: a = max(kernel_width - stride_width , 0 ) else: a = max(kernel_width - (in_width % stride_width) , 0 ) a = pad_along_width // 2 a = pad_along_width - pad_left a = pad_along_height // 2 a = pad_along_height - pad_top a = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(__lowerCamelCase , __lowerCamelCase , """constant""" , 0.0 ) class __lowerCAmelCase ( nn.Module ): def __init__( self :Optional[int] , __magic_name__ :MobileNetVaConfig , __magic_name__ :int , __magic_name__ :int , __magic_name__ :int , __magic_name__ :Optional[int] = 1 , __magic_name__ :Optional[int] = 1 , __magic_name__ :bool = False , __magic_name__ :Optional[bool] = True , __magic_name__ :Optional[bool or str] = True , ): '''simple docstring''' super().__init__() a = config if in_channels % groups != 0: raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.' ) if out_channels % groups != 0: raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.' ) a = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) a = nn.Convad( in_channels=__magic_name__ , out_channels=__magic_name__ , kernel_size=__magic_name__ , stride=__magic_name__ , padding=__magic_name__ , groups=__magic_name__ , bias=__magic_name__ , padding_mode="""zeros""" , ) if use_normalization: a = nn.BatchNormad( num_features=__magic_name__ , eps=config.layer_norm_eps , momentum=0.9997 , affine=__magic_name__ , track_running_stats=__magic_name__ , ) else: a = None if use_activation: if isinstance(__magic_name__ , __magic_name__ ): a = ACTaFN[use_activation] elif isinstance(config.hidden_act , __magic_name__ ): a = ACTaFN[config.hidden_act] else: a = config.hidden_act else: a = None def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :torch.Tensor ): '''simple docstring''' if self.config.tf_padding: a = apply_tf_padding(__magic_name__ , self.convolution ) a = self.convolution(__magic_name__ ) if self.normalization is not None: a = self.normalization(__magic_name__ ) if self.activation is not None: a = self.activation(__magic_name__ ) return features class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = MobileNetVaConfig UpperCamelCase__ = load_tf_weights_in_mobilenet_va UpperCamelCase__ = '''mobilenet_v1''' UpperCamelCase__ = '''pixel_values''' UpperCamelCase__ = False def lowerCamelCase__ ( self :List[Any] , __magic_name__ :Union[nn.Linear, nn.Convad] ): '''simple docstring''' if isinstance(__magic_name__ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__magic_name__ , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) __UpperCamelCase : List[Any] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" __UpperCamelCase : Optional[Any] = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( '''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , __magic_name__ , ) class __lowerCAmelCase ( __magic_name__ ): def __init__( self :Any , __magic_name__ :MobileNetVaConfig , __magic_name__ :bool = True ): '''simple docstring''' super().__init__(__magic_name__ ) a = config a = 32 a = max(int(depth * config.depth_multiplier ) , config.min_depth ) a = MobileNetVaConvLayer( __magic_name__ , in_channels=config.num_channels , out_channels=__magic_name__ , kernel_size=3 , stride=2 , ) a = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] a = nn.ModuleList() for i in range(13 ): a = out_channels if strides[i] == 2 or i == 0: depth *= 2 a = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( __magic_name__ , in_channels=__magic_name__ , out_channels=__magic_name__ , kernel_size=3 , stride=strides[i] , groups=__magic_name__ , ) ) self.layer.append( MobileNetVaConvLayer( __magic_name__ , in_channels=__magic_name__ , out_channels=__magic_name__ , kernel_size=1 , ) ) a = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def lowerCamelCase__ ( self :int , __magic_name__ :List[Any] ): '''simple docstring''' raise NotImplementedError @add_start_docstrings_to_model_forward(__magic_name__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCamelCase__ ( self :Any , __magic_name__ :Optional[torch.Tensor] = None , __magic_name__ :Optional[bool] = None , __magic_name__ :Optional[bool] = None , ): '''simple docstring''' a = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) a = self.conv_stem(__magic_name__ ) a = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): a = layer_module(__magic_name__ ) if output_hidden_states: a = all_hidden_states + (hidden_states,) a = hidden_states if self.pooler is not None: a = torch.flatten(self.pooler(__magic_name__ ) , start_dim=1 ) else: a = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__magic_name__ , pooler_output=__magic_name__ , hidden_states=__magic_name__ , ) @add_start_docstrings( ''' MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , __magic_name__ , ) class __lowerCAmelCase ( __magic_name__ ): def __init__( self :Optional[int] , __magic_name__ :MobileNetVaConfig ): '''simple docstring''' super().__init__(__magic_name__ ) a = config.num_labels a = MobileNetVaModel(__magic_name__ ) a = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head a = nn.Dropout(config.classifier_dropout_prob , inplace=__magic_name__ ) a = nn.Linear(__magic_name__ , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__magic_name__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCamelCase__ ( self :Dict , __magic_name__ :Optional[torch.Tensor] = None , __magic_name__ :Optional[bool] = None , __magic_name__ :Optional[torch.Tensor] = None , __magic_name__ :Optional[bool] = None , ): '''simple docstring''' a = return_dict if return_dict is not None else self.config.use_return_dict a = self.mobilenet_va(__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ ) a = outputs.pooler_output if return_dict else outputs[1] a = self.classifier(self.dropout(__magic_name__ ) ) a = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: a = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): a = """single_label_classification""" else: a = """multi_label_classification""" if self.config.problem_type == "regression": a = MSELoss() if self.num_labels == 1: a = loss_fct(logits.squeeze() , labels.squeeze() ) else: a = loss_fct(__magic_name__ , __magic_name__ ) elif self.config.problem_type == "single_label_classification": a = CrossEntropyLoss() a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": a = BCEWithLogitsLoss() a = loss_fct(__magic_name__ , __magic_name__ ) if not return_dict: a = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=__magic_name__ , logits=__magic_name__ , hidden_states=outputs.hidden_states , )
347
def __A ( __lowerCamelCase ) -> bool: return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
347
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __UpperCamelCase : Union[str, Any] = { "configuration_roberta_prelayernorm": [ "ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaPreLayerNormConfig", "RobertaPreLayerNormOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : List[str] = [ "ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST", "RobertaPreLayerNormForCausalLM", "RobertaPreLayerNormForMaskedLM", "RobertaPreLayerNormForMultipleChoice", "RobertaPreLayerNormForQuestionAnswering", "RobertaPreLayerNormForSequenceClassification", "RobertaPreLayerNormForTokenClassification", "RobertaPreLayerNormModel", "RobertaPreLayerNormPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[Any] = [ "TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRobertaPreLayerNormForCausalLM", "TFRobertaPreLayerNormForMaskedLM", "TFRobertaPreLayerNormForMultipleChoice", "TFRobertaPreLayerNormForQuestionAnswering", "TFRobertaPreLayerNormForSequenceClassification", "TFRobertaPreLayerNormForTokenClassification", "TFRobertaPreLayerNormMainLayer", "TFRobertaPreLayerNormModel", "TFRobertaPreLayerNormPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Any = [ "FlaxRobertaPreLayerNormForCausalLM", "FlaxRobertaPreLayerNormForMaskedLM", "FlaxRobertaPreLayerNormForMultipleChoice", "FlaxRobertaPreLayerNormForQuestionAnswering", "FlaxRobertaPreLayerNormForSequenceClassification", "FlaxRobertaPreLayerNormForTokenClassification", "FlaxRobertaPreLayerNormModel", "FlaxRobertaPreLayerNormPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys __UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
347
def __A ( __lowerCamelCase ) -> int: if not numbers: return 0 if not isinstance(__lowerCamelCase , (list, tuple) ) or not all( isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ): raise ValueError("""numbers must be an iterable of integers""" ) a = a = a = numbers[0] for i in range(1 , len(__lowerCamelCase ) ): # update the maximum and minimum subarray products a = numbers[i] if number < 0: a , a = min_till_now, max_till_now a = max(__lowerCamelCase , max_till_now * number ) a = min(__lowerCamelCase , min_till_now * number ) # update the maximum product found till now a = max(__lowerCamelCase , __lowerCamelCase ) return max_prod
347
1
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES __UpperCamelCase : str = "tiny-wmt19-en-ru" # Build # borrowed from a test __UpperCamelCase : Tuple = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] __UpperCamelCase : Optional[int] = dict(zip(vocab, range(len(vocab)))) __UpperCamelCase : str = ["l o 123", "lo w 1456", "e r</w> 1789", ""] with tempfile.TemporaryDirectory() as tmpdirname: __UpperCamelCase : Optional[Any] = Path(tmpdirname) __UpperCamelCase : int = build_dir / VOCAB_FILES_NAMES["src_vocab_file"] __UpperCamelCase : Dict = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"] __UpperCamelCase : Union[str, Any] = build_dir / VOCAB_FILES_NAMES["merges_file"] with open(src_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, "w") as fp: fp.write("\n".join(merges)) __UpperCamelCase : Optional[Any] = FSMTTokenizer( langs=["en", "ru"], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) __UpperCamelCase : List[str] = FSMTConfig( langs=["ru", "en"], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) __UpperCamelCase : str = FSMTForConditionalGeneration(config) print(F'num of params {tiny_model.num_parameters()}') # Test __UpperCamelCase : str = tokenizer(["Making tiny model"], return_tensors="pt") __UpperCamelCase : Union[str, Any] = tiny_model(**batch) print("test output:", len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F'Generated {mname_tiny}') # Upload # transformers-cli upload tiny-wmt19-en-ru
347
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __UpperCamelCase : Optional[Any] = { "configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"], "feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"], "processing_wav2vec2": ["Wav2Vec2Processor"], "tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : str = [ "WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "Wav2Vec2ForAudioFrameClassification", "Wav2Vec2ForCTC", "Wav2Vec2ForMaskedLM", "Wav2Vec2ForPreTraining", "Wav2Vec2ForSequenceClassification", "Wav2Vec2ForXVector", "Wav2Vec2Model", "Wav2Vec2PreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Dict = [ "TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "TFWav2Vec2ForCTC", "TFWav2Vec2Model", "TFWav2Vec2PreTrainedModel", "TFWav2Vec2ForSequenceClassification", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Union[str, Any] = [ "FlaxWav2Vec2ForCTC", "FlaxWav2Vec2ForPreTraining", "FlaxWav2Vec2Model", "FlaxWav2Vec2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
347
1
import cmath import math def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> complex: a = math.radians(__lowerCamelCase ) a = math.radians(__lowerCamelCase ) # Convert voltage and current to rectangular form a = cmath.rect(__lowerCamelCase , __lowerCamelCase ) a = cmath.rect(__lowerCamelCase , __lowerCamelCase ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
347
import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any: if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , __lowerCamelCase ) a = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: a = dataset_size < in_memory_max_size else: a = False a = is_small_dataset(__lowerCamelCase ) assert result == expected
347
1
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __lowerCAmelCase ( __magic_name__ ): @slow @require_torch def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) a = BertTokenizer.from_pretrained("""bert-base-uncased""" ) a = bertabert.config.encoder.vocab_size a = tokenizer.sep_token_id a = tokenizer.cls_token_id a = 128 a = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) a = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) a = train_dataset.select(range(32 ) ) a = val_dataset.select(range(16 ) ) a = 4 def _map_to_encoder_decoder_inputs(__magic_name__ :Optional[Any] ): # Tokenizer will automatically set [BOS] <text> [EOS] a = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=__magic_name__ , max_length=512 ) a = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=__magic_name__ , max_length=128 ) a = inputs.input_ids a = inputs.attention_mask a = outputs.input_ids a = outputs.input_ids.copy() a = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] a = outputs.attention_mask assert all(len(__magic_name__ ) == 512 for x in inputs.input_ids ) assert all(len(__magic_name__ ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(__magic_name__ :str ): a = pred.label_ids a = pred.predictions # all unnecessary tokens are removed a = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ ) a = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ ) a = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__magic_name__ ) )] ) / len(__magic_name__ ) return {"accuracy": accuracy} # map train dataset a = train_dataset.map( _map_to_encoder_decoder_inputs , batched=__magic_name__ , batch_size=__magic_name__ , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset a = val_dataset.map( _map_to_encoder_decoder_inputs , batched=__magic_name__ , batch_size=__magic_name__ , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) a = self.get_auto_remove_tmp_dir() a = SeqaSeqTrainingArguments( output_dir=__magic_name__ , per_device_train_batch_size=__magic_name__ , per_device_eval_batch_size=__magic_name__ , predict_with_generate=__magic_name__ , evaluation_strategy="""steps""" , do_train=__magic_name__ , do_eval=__magic_name__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer a = SeqaSeqTrainer( model=__magic_name__ , args=__magic_name__ , compute_metrics=_compute_metrics , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , tokenizer=__magic_name__ , ) # start training trainer.train()
347
from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def __A ( __lowerCamelCase ) -> bool: a = int(number**0.5 ) return number == sq * sq def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> tuple[int, int]: a = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den a = x_den * y_den * z_den a = gcd(__lowerCamelCase , __lowerCamelCase ) top //= hcf bottom //= hcf return top, bottom def __A ( __lowerCamelCase = 35 ) -> int: a = set() a = 42 a = Fraction(0 ) a = 42 for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 a = x_num * y_den + x_den * y_num a = x_den * y_den a = gcd(__lowerCamelCase , __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: a = add_three( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) unique_s.add(__lowerCamelCase ) # n=2 a = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) a = x_den * x_den * y_den * y_den if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ): a = int(sqrt(__lowerCamelCase ) ) a = int(sqrt(__lowerCamelCase ) ) a = gcd(__lowerCamelCase , __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: a = add_three( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) unique_s.add(__lowerCamelCase ) # n=-1 a = x_num * y_num a = x_den * y_num + x_num * y_den a = gcd(__lowerCamelCase , __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: a = add_three( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) unique_s.add(__lowerCamelCase ) # n=2 a = x_num * x_num * y_num * y_num a = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ): a = int(sqrt(__lowerCamelCase ) ) a = int(sqrt(__lowerCamelCase ) ) a = gcd(__lowerCamelCase , __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: a = add_three( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) unique_s.add(__lowerCamelCase ) for num, den in unique_s: total += Fraction(__lowerCamelCase , __lowerCamelCase ) return total.denominator + total.numerator if __name__ == "__main__": print(F'{solution() = }')
347
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = '''upernet''' def __init__( self :int , __magic_name__ :Optional[int]=None , __magic_name__ :Union[str, Any]=512 , __magic_name__ :Optional[int]=0.02 , __magic_name__ :Optional[Any]=[1, 2, 3, 6] , __magic_name__ :Optional[int]=True , __magic_name__ :Tuple=0.4 , __magic_name__ :Any=384 , __magic_name__ :Tuple=256 , __magic_name__ :Tuple=1 , __magic_name__ :Tuple=False , __magic_name__ :List[Any]=255 , **__magic_name__ :int , ): '''simple docstring''' super().__init__(**__magic_name__ ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) a = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(__magic_name__ , __magic_name__ ): a = backbone_config.get("""model_type""" ) a = CONFIG_MAPPING[backbone_model_type] a = config_class.from_dict(__magic_name__ ) a = backbone_config a = hidden_size a = initializer_range a = pool_scales a = use_auxiliary_head a = auxiliary_loss_weight a = auxiliary_in_channels a = auxiliary_channels a = auxiliary_num_convs a = auxiliary_concat_input a = loss_ignore_index def lowerCamelCase__ ( self :str ): '''simple docstring''' a = copy.deepcopy(self.__dict__ ) a = self.backbone_config.to_dict() a = self.__class__.model_type return output
347
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __lowerCAmelCase ( unittest.TestCase ): def __init__( self :List[str] , __magic_name__ :List[str] , __magic_name__ :List[Any]=13 , __magic_name__ :Any=7 , __magic_name__ :Optional[int]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :Any=99 , __magic_name__ :List[str]=32 , __magic_name__ :List[str]=5 , __magic_name__ :str=4 , __magic_name__ :str=37 , __magic_name__ :Optional[int]="gelu" , __magic_name__ :int=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :List[str]=512 , __magic_name__ :Tuple=16 , __magic_name__ :Tuple=2 , __magic_name__ :List[str]=0.02 , __magic_name__ :Any=4 , ): '''simple docstring''' a = parent a = batch_size a = seq_length a = is_training a = use_attention_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_choices def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_attention_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = self.prepare_config_and_inputs() a , a , a , a = config_and_inputs a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): UpperCamelCase__ = True UpperCamelCase__ = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = FlaxRoFormerModelTester(self ) @slow def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' for model_class_name in self.all_model_classes: a = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__magic_name__ ) a = model(np.ones((1, 1) ) ) self.assertIsNotNone(__magic_name__ ) @require_flax class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) a = jnp.array([[0, 1, 2, 3, 4, 5]] ) a = model(__magic_name__ )[0] a = 5_0000 a = (1, 6, vocab_size) self.assertEqual(output.shape , __magic_name__ ) a = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
347
1
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) a = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" a = model(__magic_name__ )["""last_hidden_state"""] a = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __magic_name__ ) # compare the actual values for a slice. a = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
347
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __UpperCamelCase : Optional[int] = { "configuration_blenderbot": [ "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotOnnxConfig", ], "tokenization_blenderbot": ["BlenderbotTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : List[Any] = ["BlenderbotTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Union[str, Any] = [ "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotForCausalLM", "BlenderbotForConditionalGeneration", "BlenderbotModel", "BlenderbotPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = [ "TFBlenderbotForConditionalGeneration", "TFBlenderbotModel", "TFBlenderbotPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ "FlaxBlenderbotForConditionalGeneration", "FlaxBlenderbotModel", "FlaxBlenderbotPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys __UpperCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
347
1
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node __UpperCamelCase : List[str] = 4 __UpperCamelCase : List[Any] = 3 class __lowerCAmelCase ( __magic_name__ ): pass def __A ( __lowerCamelCase ) -> Union[str, Any]: for shard in shards: for i in range(__lowerCamelCase ): yield {"i": i, "shard": shard} def __A ( ) -> str: a = int(os.environ["""RANK"""] ) a = int(os.environ["""WORLD_SIZE"""] ) a = ArgumentParser() parser.add_argument("""--streaming""" , type=__lowerCamelCase ) parser.add_argument("""--local_rank""" , type=__lowerCamelCase ) parser.add_argument("""--num_workers""" , type=__lowerCamelCase , default=0 ) a = parser.parse_args() a = args.streaming a = args.num_workers a = {"""shards""": [f'shard_{shard_idx}' for shard_idx in range(__lowerCamelCase )]} a = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase ) if not streaming: a = Dataset.from_list(list(__lowerCamelCase ) ) a = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase ) a = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase ) a = NUM_SHARDS * NUM_ITEMS_PER_SHARD a = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) a = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f'local_size {local_size} != expected_local_size {expected_local_size}' ) if __name__ == "__main__": main()
347
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = (IPNDMScheduler,) UpperCamelCase__ = (('''num_inference_steps''', 50),) def lowerCamelCase__ ( self :Any , **__magic_name__ :Optional[Any] ): '''simple docstring''' a = {"""num_train_timesteps""": 1000} config.update(**__magic_name__ ) return config def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple=0 , **__magic_name__ :Optional[int] ): '''simple docstring''' a = dict(self.forward_default_kwargs ) a = kwargs.pop("""num_inference_steps""" , __magic_name__ ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__magic_name__ ) a = scheduler_class(**__magic_name__ ) scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residuals a = dummy_past_residuals[:] if time_step is None: a = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__magic_name__ ) a = scheduler_class.from_pretrained(__magic_name__ ) new_scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residuals a = dummy_past_residuals[:] a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[Any]=0 , **__magic_name__ :Any ): '''simple docstring''' a = dict(self.forward_default_kwargs ) a = kwargs.pop("""num_inference_steps""" , __magic_name__ ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__magic_name__ ) scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residuals (must be after setting timesteps) a = dummy_past_residuals[:] if time_step is None: a = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__magic_name__ ) a = scheduler_class.from_pretrained(__magic_name__ ) # copy over dummy past residuals new_scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residual (must be after setting timesteps) a = dummy_past_residuals[:] a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCamelCase__ ( self :Optional[Any] , **__magic_name__ :Optional[int] ): '''simple docstring''' a = self.scheduler_classes[0] a = self.get_scheduler_config(**__magic_name__ ) a = scheduler_class(**__magic_name__ ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter scheduler.set_timesteps(__magic_name__ ) for i, t in enumerate(scheduler.timesteps ): a = model(__magic_name__ , __magic_name__ ) a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample for i, t in enumerate(scheduler.timesteps ): a = model(__magic_name__ , __magic_name__ ) a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample return sample def lowerCamelCase__ ( self :str ): '''simple docstring''' a = dict(self.forward_default_kwargs ) a = kwargs.pop("""num_inference_steps""" , __magic_name__ ) for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__magic_name__ ) a = self.dummy_sample a = 0.1 * sample if num_inference_steps is not None and hasattr(__magic_name__ , """set_timesteps""" ): scheduler.set_timesteps(__magic_name__ ) elif num_inference_steps is not None and not hasattr(__magic_name__ , """set_timesteps""" ): a = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] a = dummy_past_residuals[:] a = scheduler.timesteps[5] a = scheduler.timesteps[6] a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=__magic_name__ , time_step=__magic_name__ ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=__magic_name__ , time_step=__magic_name__ ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.full_loop() a = torch.mean(torch.abs(__magic_name__ ) ) assert abs(result_mean.item() - 254_0529 ) < 10
347
1
from __future__ import annotations def __A ( __lowerCamelCase , __lowerCamelCase ) -> int: print(f'Vertex\tShortest Distance from vertex {src}' ) for i, d in enumerate(__lowerCamelCase ): print(f'{i}\t\t{d}' ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict: for j in range(__lowerCamelCase ): a , a , a = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> list[float]: a = [float("""inf""" )] * vertex_count a = 0.0 for _ in range(vertex_count - 1 ): for j in range(__lowerCamelCase ): a , a , a = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: a = distance[u] + w a = check_negative_cycle(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() __UpperCamelCase : Dict = int(input("Enter number of vertices: ").strip()) __UpperCamelCase : Dict = int(input("Enter number of edges: ").strip()) __UpperCamelCase : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print("Edge ", i + 1) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = ( int(x) for x in input("Enter source, destination, weight: ").strip().split(" ") ) __UpperCamelCase : Tuple = {"src": src, "dst": dest, "weight": weight} __UpperCamelCase : List[str] = int(input("\nEnter shortest path source:").strip()) __UpperCamelCase : Tuple = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
347
__UpperCamelCase : Dict = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def __A ( ) -> None: a = input("""Enter message: """ ) a = input("""Enter key [alphanumeric]: """ ) a = input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): a = """encrypt""" a = encrypt_message(__lowerCamelCase , __lowerCamelCase ) elif mode.lower().startswith("""d""" ): a = """decrypt""" a = decrypt_message(__lowerCamelCase , __lowerCamelCase ) print(f'\n{mode.title()}ed message:' ) print(__lowerCamelCase ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> str: return translate_message(__lowerCamelCase , __lowerCamelCase , """encrypt""" ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> str: return translate_message(__lowerCamelCase , __lowerCamelCase , """decrypt""" ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str: a = [] a = 0 a = key.upper() for symbol in message: a = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(__lowerCamelCase ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(__lowerCamelCase ): a = 0 else: translated.append(__lowerCamelCase ) return "".join(__lowerCamelCase ) if __name__ == "__main__": main()
347
1
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __UpperCamelCase : Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right __UpperCamelCase : Optional[Any] = 250_004 __UpperCamelCase : Any = 250_020 @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): UpperCamelCase__ = MBartTokenizer UpperCamelCase__ = MBartTokenizerFast UpperCamelCase__ = True UpperCamelCase__ = True def lowerCamelCase__ ( self :Dict ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing a = MBartTokenizer(__magic_name__ , keep_accents=__magic_name__ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = MBartTokenizer(__magic_name__ , keep_accents=__magic_name__ ) a = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) a = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) a = tokenizer.convert_tokens_to_ids(__magic_name__ ) self.assertListEqual( __magic_name__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) a = tokenizer.convert_ids_to_tokens(__magic_name__ ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return a = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): a = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ ) a = self.tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ ) a = tempfile.mkdtemp() a = tokenizer_r.save_pretrained(__magic_name__ ) a = tokenizer_p.save_pretrained(__magic_name__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) a = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(__magic_name__ , __magic_name__ ) # Checks everything loads correctly in the same way a = tokenizer_r.from_pretrained(__magic_name__ ) a = tokenizer_p.from_pretrained(__magic_name__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__magic_name__ , __magic_name__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__magic_name__ ) # Save tokenizer rust, legacy_format=True a = tempfile.mkdtemp() a = tokenizer_r.save_pretrained(__magic_name__ , legacy_format=__magic_name__ ) a = tokenizer_p.save_pretrained(__magic_name__ ) # Checks it save with the same files self.assertSequenceEqual(__magic_name__ , __magic_name__ ) # Checks everything loads correctly in the same way a = tokenizer_r.from_pretrained(__magic_name__ ) a = tokenizer_p.from_pretrained(__magic_name__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__magic_name__ , __magic_name__ ) ) shutil.rmtree(__magic_name__ ) # Save tokenizer rust, legacy_format=False a = tempfile.mkdtemp() a = tokenizer_r.save_pretrained(__magic_name__ , legacy_format=__magic_name__ ) a = tokenizer_p.save_pretrained(__magic_name__ ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way a = tokenizer_r.from_pretrained(__magic_name__ ) a = tokenizer_p.from_pretrained(__magic_name__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__magic_name__ , __magic_name__ ) ) shutil.rmtree(__magic_name__ ) @require_torch @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): UpperCamelCase__ = '''facebook/mbart-large-en-ro''' UpperCamelCase__ = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''', ] UpperCamelCase__ = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei''' ''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor''' ''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] UpperCamelCase__ = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE] @classmethod def lowerCamelCase__ ( cls :Optional[int] ): '''simple docstring''' a = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" ) a = 1 return cls def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 25_0001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 25_0004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 25_0020 ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , __magic_name__ ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' self.assertIn(__magic_name__ , self.tokenizer.all_special_ids ) a = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2] a = self.tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__magic_name__ ) self.assertEqual(__magic_name__ , __magic_name__ ) self.assertNotIn(self.tokenizer.eos_token , __magic_name__ ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , __magic_name__ ) a = 10 a = self.tokenizer(__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , __magic_name__ ) self.assertEqual(len(__magic_name__ ) , __magic_name__ ) def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_0026, 25_0001] ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = tempfile.mkdtemp() a = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(__magic_name__ ) a = MBartTokenizer.from_pretrained(__magic_name__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __magic_name__ ) @require_torch def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__magic_name__ , return_tensors="""pt""" ) a = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=__magic_name__ , truncation=__magic_name__ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) a = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(__magic_name__ , __magic_name__ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) a = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , __magic_name__ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = self.tokenizer(self.src_text , padding=__magic_name__ , truncation=__magic_name__ , max_length=3 , return_tensors="""pt""" ) a = self.tokenizer( text_target=self.tgt_text , padding=__magic_name__ , truncation=__magic_name__ , max_length=10 , return_tensors="""pt""" ) a = targets["""input_ids"""] a = shift_tokens_right(__magic_name__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" ) self.assertEqual( nested_simplify(__magic_name__ ) , { # A, test, EOS, en_XX """input_ids""": [[62, 3034, 2, 25_0004]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 25_0001, } , )
347
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class __lowerCAmelCase ( unittest.TestCase ): def __init__( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Tuple=13 , __magic_name__ :List[Any]=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :List[str]=True , __magic_name__ :str=99 , __magic_name__ :Optional[Any]=32 , __magic_name__ :Union[str, Any]=5 , __magic_name__ :Any=4 , __magic_name__ :int=37 , __magic_name__ :Tuple="gelu" , __magic_name__ :List[str]=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :Tuple=512 , __magic_name__ :Dict=16 , __magic_name__ :Optional[int]=2 , __magic_name__ :Optional[int]=0.02 , __magic_name__ :Optional[Any]=4 , ): '''simple docstring''' a = parent a = batch_size a = seq_length a = is_training a = use_attention_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_choices def lowerCamelCase__ ( self :int ): '''simple docstring''' a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_attention_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = self.prepare_config_and_inputs() a , a , a , a = config_and_inputs a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = self.prepare_config_and_inputs() a , a , a , a = config_and_inputs a = True a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): UpperCamelCase__ = True UpperCamelCase__ = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = FlaxRobertaModelTester(self ) @slow def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: a = model_class_name.from_pretrained("""roberta-base""" , from_pt=__magic_name__ ) a = model(np.ones((1, 1) ) ) self.assertIsNotNone(__magic_name__ )
347
1
from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
347
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline __UpperCamelCase : Any = datasets.utils.logging.get_logger(__name__) @dataclass class __lowerCAmelCase ( datasets.BuilderConfig ): UpperCamelCase__ = None UpperCamelCase__ = "utf-8" UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = True # deprecated UpperCamelCase__ = None # deprecated UpperCamelCase__ = 10 << 20 # 10MB UpperCamelCase__ = None class __lowerCAmelCase ( datasets.ArrowBasedBuilder ): UpperCamelCase__ = JsonConfig def lowerCamelCase__ ( self :str ): '''simple docstring''' if self.config.block_size is not None: logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" ) a = self.config.block_size if self.config.use_threads is not True: logger.warning( """The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" ) if self.config.newlines_in_values is not None: raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" ) return datasets.DatasetInfo(features=self.config.features ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ): '''simple docstring''' if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) a = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__magic_name__ , (str, list, tuple) ): a = data_files if isinstance(__magic_name__ , __magic_name__ ): a = [files] a = [dl_manager.iter_files(__magic_name__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] a = [] for split_name, files in data_files.items(): if isinstance(__magic_name__ , __magic_name__ ): a = [files] a = [dl_manager.iter_files(__magic_name__ ) for file in files] splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"""files""": files} ) ) return splits def lowerCamelCase__ ( self :List[str] , __magic_name__ :pa.Table ): '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): a = self.config.features.arrow_schema.field(__magic_name__ ).type a = pa_table.append_column(__magic_name__ , pa.array([None] * len(__magic_name__ ) , type=__magic_name__ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example a = table_cast(__magic_name__ , self.config.features.arrow_schema ) return pa_table def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Union[str, Any] ): '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: a = json.load(__magic_name__ ) # We keep only the field we are interested in a = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(__magic_name__ , (list, tuple) ): a = set().union(*[row.keys() for row in dataset] ) a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys} else: a = dataset a = pa.Table.from_pydict(__magic_name__ ) yield file_idx, self._cast_table(__magic_name__ ) # If the file has one json object per line else: with open(__magic_name__ , """rb""" ) as f: a = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small a = max(self.config.chunksize // 32 , 16 << 10 ) a = ( self.config.encoding_errors if self.config.encoding_errors is not None else """strict""" ) while True: a = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(__magic_name__ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": a = batch.decode(self.config.encoding , errors=__magic_name__ ).encode("""utf-8""" ) try: while True: try: a = paj.read_json( io.BytesIO(__magic_name__ ) , read_options=paj.ReadOptions(block_size=__magic_name__ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(__magic_name__ , pa.ArrowInvalid ) and "straddling" not in str(__magic_name__ ) or block_size > len(__magic_name__ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F'Batch of {len(__magic_name__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( __magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: a = json.load(__magic_name__ ) except json.JSONDecodeError: logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(__magic_name__ , __magic_name__ ): # list is the only sequence type supported in JSON try: a = set().union(*[row.keys() for row in dataset] ) a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys} a = pa.Table.from_pydict(__magic_name__ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' ) raise ValueError(F'Not able to read records in the JSON file at {file}.' ) from None yield file_idx, self._cast_table(__magic_name__ ) break else: logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' ) raise ValueError( F'Not able to read records in the JSON file at {file}. ' F'You should probably indicate the field of the JSON file containing your records. ' F'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ' F'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(__magic_name__ ) batch_idx += 1
347
1
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: a = 1.5 a = int(factor * num_class_images ) a = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__lowerCamelCase , aesthetic_weight=0.1 ) os.makedirs(f'{class_data_dir}/images' , exist_ok=__lowerCamelCase ) if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images: return while True: a = client.query(text=__lowerCamelCase ) if len(__lowerCamelCase ) >= factor * num_class_images or num_images > 1E4: break else: a = int(factor * num_images ) a = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__lowerCamelCase , aesthetic_weight=0.1 , ) a = 0 a = 0 a = tqdm(desc="""downloading real regularization images""" , total=__lowerCamelCase ) with open(f'{class_data_dir}/caption.txt' , """w""" ) as fa, open(f'{class_data_dir}/urls.txt' , """w""" ) as fa, open( f'{class_data_dir}/images.txt' , """w""" ) as fa: while total < num_class_images: a = class_images[count] count += 1 try: a = requests.get(images["""url"""] ) if img.status_code == 200: a = Image.open(BytesIO(img.content ) ) with open(f'{class_data_dir}/images/{total}.jpg' , """wb""" ) as f: f.write(img.content ) fa.write(images["""caption"""] + """\n""" ) fa.write(images["""url"""] + """\n""" ) fa.write(f'{class_data_dir}/images/{total}.jpg' + """\n""" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def __A ( ) -> str: a = argparse.ArgumentParser("""""" , add_help=__lowerCamelCase ) parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=__lowerCamelCase , type=__lowerCamelCase ) parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=__lowerCamelCase , type=__lowerCamelCase ) parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=__lowerCamelCase ) return parser.parse_args() if __name__ == "__main__": __UpperCamelCase : Union[str, Any] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
347
import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __UpperCamelCase : Dict = logging.get_logger(__name__) class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = ['''input_ids''', '''attention_mask'''] def __init__( self :List[str] , __magic_name__ :int="</s>" , __magic_name__ :List[Any]="<unk>" , __magic_name__ :Optional[Any]="<pad>" , __magic_name__ :Optional[int]=125 , __magic_name__ :List[str]=None , **__magic_name__ :List[str] , ): '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: a = [F'<extra_id_{i}>' for i in range(__magic_name__ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens a = len(set(filter(lambda __magic_name__ : bool("""extra_id""" in str(__magic_name__ ) ) , __magic_name__ ) ) ) if extra_tokens != extra_ids: raise ValueError( F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are' """ provided to ByT5Tokenizer. In this case the additional_special_tokens must include the""" """ extra_ids tokens""" ) a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token super().__init__( eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , extra_ids=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , ) a = extra_ids a = 2**8 # utf is 8 bits # define special tokens dict a = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } a = len(self.special_tokens_encoder ) a = len(__magic_name__ ) for i, token in enumerate(__magic_name__ ): a = self.vocab_size + i - n a = {v: k for k, v in self.special_tokens_encoder.items()} @property def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def lowerCamelCase__ ( self :Any , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None , __magic_name__ :bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(__magic_name__ )) + [1] return ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1] def lowerCamelCase__ ( self :str , __magic_name__ :List[int] ): '''simple docstring''' if len(__magic_name__ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated' """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ): '''simple docstring''' a = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ): '''simple docstring''' a = self._add_eos_if_not_present(__magic_name__ ) if token_ids_a is None: return token_ids_a else: a = self._add_eos_if_not_present(__magic_name__ ) return token_ids_a + token_ids_a def lowerCamelCase__ ( self :List[str] , __magic_name__ :str ): '''simple docstring''' a = [chr(__magic_name__ ) for i in text.encode("""utf-8""" )] return tokens def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ): '''simple docstring''' if token in self.special_tokens_encoder: a = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: a = self.added_tokens_encoder[token] elif len(__magic_name__ ) != 1: a = self.unk_token_id else: a = ord(__magic_name__ ) + self._num_special_tokens return token_id def lowerCamelCase__ ( self :List[str] , __magic_name__ :Dict ): '''simple docstring''' if index in self.special_tokens_decoder: a = self.special_tokens_decoder[index] else: a = chr(index - self._num_special_tokens ) return token def lowerCamelCase__ ( self :Tuple , __magic_name__ :Optional[int] ): '''simple docstring''' a = b"""""" for token in tokens: if token in self.special_tokens_decoder: a = self.special_tokens_decoder[token].encode("""utf-8""" ) elif token in self.added_tokens_decoder: a = self.special_tokens_decoder[token].encode("""utf-8""" ) elif token in self.special_tokens_encoder: a = token.encode("""utf-8""" ) elif token in self.added_tokens_encoder: a = token.encode("""utf-8""" ) else: a = bytes([ord(__magic_name__ )] ) bstring += tok_string a = bstring.decode("""utf-8""" , errors="""ignore""" ) return string def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :str , __magic_name__ :Optional[str] = None ): '''simple docstring''' return ()
347
1
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class __lowerCAmelCase : UpperCamelCase__ = 42 UpperCamelCase__ = None # Automatically constructed UpperCamelCase__ = "dict" UpperCamelCase__ = None UpperCamelCase__ = field(default='''Translation''' , init=__magic_name__ , repr=__magic_name__ ) def __call__( self :int ): '''simple docstring''' return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class __lowerCAmelCase : UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = None # Automatically constructed UpperCamelCase__ = "dict" UpperCamelCase__ = None UpperCamelCase__ = field(default='''TranslationVariableLanguages''' , init=__magic_name__ , repr=__magic_name__ ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = sorted(set(self.languages ) ) if self.languages else None a = len(self.languages ) if self.languages else None def __call__( self :Tuple ): '''simple docstring''' return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def lowerCamelCase__ ( self :int , __magic_name__ :str ): '''simple docstring''' a = set(self.languages ) if self.languages and set(__magic_name__ ) - lang_set: raise ValueError( F'Some languages in example ({", ".join(sorted(set(__magic_name__ ) - lang_set ) )}) are not in valid set ({", ".join(__magic_name__ )}).' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. a = [] for lang, text in translation_dict.items(): if isinstance(__magic_name__ , __magic_name__ ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. a , a = zip(*sorted(__magic_name__ ) ) return {"language": languages, "translation": translations} def lowerCamelCase__ ( self :Dict ): '''simple docstring''' from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
347
import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __lowerCAmelCase : def __init__( self :Optional[int] , __magic_name__ :str , __magic_name__ :int=2 , __magic_name__ :List[str]=3 , __magic_name__ :Optional[int]=4 , __magic_name__ :str=2 , __magic_name__ :Any=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Dict=True , __magic_name__ :List[Any]=99 , __magic_name__ :Dict=36 , __magic_name__ :Optional[Any]=3 , __magic_name__ :str=4 , __magic_name__ :Optional[Any]=37 , __magic_name__ :Dict="gelu" , __magic_name__ :Any=0.1 , __magic_name__ :Union[str, Any]=0.1 , __magic_name__ :Dict=512 , __magic_name__ :str=16 , __magic_name__ :List[Any]=2 , __magic_name__ :Tuple=0.02 , __magic_name__ :Any=6 , __magic_name__ :Optional[int]=6 , __magic_name__ :Tuple=3 , __magic_name__ :str=4 , __magic_name__ :List[str]=None , __magic_name__ :str=1000 , ): '''simple docstring''' a = parent a = batch_size a = num_channels a = image_size a = patch_size a = text_seq_length a = is_training a = use_input_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = coordinate_size a = shape_size a = num_labels a = num_choices a = scope a = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) a = text_seq_length a = (image_size // patch_size) ** 2 + 1 a = self.text_seq_length + self.image_seq_length def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) a = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: a = bbox[i, j, 3] a = bbox[i, j, 1] a = t if bbox[i, j, 2] < bbox[i, j, 0]: a = bbox[i, j, 2] a = bbox[i, j, 0] a = t a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a = None if self.use_input_mask: a = random_attention_mask([self.batch_size, self.text_seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) a = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowerCamelCase__ ( self :int , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :List[str] , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int ): '''simple docstring''' a = LayoutLMvaModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() # text + image a = model(__magic_name__ , pixel_values=__magic_name__ ) a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , token_type_ids=__magic_name__ ) a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only a = model(__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only a = model(pixel_values=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :List[Any] , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :List[str] , __magic_name__ :List[str] ): '''simple docstring''' a = self.num_labels a = LayoutLMvaForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :Dict , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :List[str] , __magic_name__ :Tuple ): '''simple docstring''' a = self.num_labels a = LayoutLMvaForTokenClassification(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :str , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :str , __magic_name__ :List[str] , __magic_name__ :Optional[int] , __magic_name__ :Optional[Any] ): '''simple docstring''' a = LayoutLMvaForQuestionAnswering(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) = config_and_inputs a = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) UpperCamelCase__ = ( {'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel} if is_torch_available() else {} ) def lowerCamelCase__ ( self :List[str] , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :List[Any] ): '''simple docstring''' return True def lowerCamelCase__ ( self :int ): '''simple docstring''' a = LayoutLMvaModelTester(self ) a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :Any=False ): '''simple docstring''' a = copy.deepcopy(__magic_name__ ) if model_class in get_values(__magic_name__ ): a = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(__magic_name__ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__magic_name__ ): a = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) elif model_class in get_values(__magic_name__ ): a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) elif model_class in [ *get_values(__magic_name__ ), ]: a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) elif model_class in [ *get_values(__magic_name__ ), ]: a = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__magic_name__ , ) return inputs_dict def lowerCamelCase__ ( self :str ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a = type self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__magic_name__ ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__magic_name__ ) @slow def lowerCamelCase__ ( self :int ): '''simple docstring''' for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = LayoutLMvaModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def __A ( ) -> str: a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None @slow def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(__magic_name__ ) a = self.default_image_processor a = prepare_img() a = image_processor(images=__magic_name__ , return_tensors="""pt""" ).pixel_values.to(__magic_name__ ) a = torch.tensor([[1, 2]] ) a = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass a = model( input_ids=input_ids.to(__magic_name__ ) , bbox=bbox.to(__magic_name__ ) , pixel_values=pixel_values.to(__magic_name__ ) , ) # verify the logits a = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ ) a = torch.tensor( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
347
1
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = (IPNDMScheduler,) UpperCamelCase__ = (('''num_inference_steps''', 50),) def lowerCamelCase__ ( self :Any , **__magic_name__ :Optional[Any] ): '''simple docstring''' a = {"""num_train_timesteps""": 1000} config.update(**__magic_name__ ) return config def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple=0 , **__magic_name__ :Optional[int] ): '''simple docstring''' a = dict(self.forward_default_kwargs ) a = kwargs.pop("""num_inference_steps""" , __magic_name__ ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__magic_name__ ) a = scheduler_class(**__magic_name__ ) scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residuals a = dummy_past_residuals[:] if time_step is None: a = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__magic_name__ ) a = scheduler_class.from_pretrained(__magic_name__ ) new_scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residuals a = dummy_past_residuals[:] a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[Any]=0 , **__magic_name__ :Any ): '''simple docstring''' a = dict(self.forward_default_kwargs ) a = kwargs.pop("""num_inference_steps""" , __magic_name__ ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__magic_name__ ) scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residuals (must be after setting timesteps) a = dummy_past_residuals[:] if time_step is None: a = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__magic_name__ ) a = scheduler_class.from_pretrained(__magic_name__ ) # copy over dummy past residuals new_scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residual (must be after setting timesteps) a = dummy_past_residuals[:] a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCamelCase__ ( self :Optional[Any] , **__magic_name__ :Optional[int] ): '''simple docstring''' a = self.scheduler_classes[0] a = self.get_scheduler_config(**__magic_name__ ) a = scheduler_class(**__magic_name__ ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter scheduler.set_timesteps(__magic_name__ ) for i, t in enumerate(scheduler.timesteps ): a = model(__magic_name__ , __magic_name__ ) a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample for i, t in enumerate(scheduler.timesteps ): a = model(__magic_name__ , __magic_name__ ) a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample return sample def lowerCamelCase__ ( self :str ): '''simple docstring''' a = dict(self.forward_default_kwargs ) a = kwargs.pop("""num_inference_steps""" , __magic_name__ ) for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__magic_name__ ) a = self.dummy_sample a = 0.1 * sample if num_inference_steps is not None and hasattr(__magic_name__ , """set_timesteps""" ): scheduler.set_timesteps(__magic_name__ ) elif num_inference_steps is not None and not hasattr(__magic_name__ , """set_timesteps""" ): a = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] a = dummy_past_residuals[:] a = scheduler.timesteps[5] a = scheduler.timesteps[6] a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=__magic_name__ , time_step=__magic_name__ ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=__magic_name__ , time_step=__magic_name__ ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.full_loop() a = torch.mean(torch.abs(__magic_name__ ) ) assert abs(result_mean.item() - 254_0529 ) < 10
347
from copy import deepcopy class __lowerCAmelCase : def __init__( self :Union[str, Any] , __magic_name__ :list[int] | None = None , __magic_name__ :int | None = None ): '''simple docstring''' if arr is None and size is not None: a = size a = [0] * size elif arr is not None: self.init(__magic_name__ ) else: raise ValueError("""Either arr or size must be specified""" ) def lowerCamelCase__ ( self :Dict , __magic_name__ :list[int] ): '''simple docstring''' a = len(__magic_name__ ) a = deepcopy(__magic_name__ ) for i in range(1 , self.size ): a = self.next_(__magic_name__ ) if j < self.size: self.tree[j] += self.tree[i] def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): a = self.next_(__magic_name__ ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def lowerCamelCase__ ( __magic_name__ :int ): '''simple docstring''' return index + (index & (-index)) @staticmethod def lowerCamelCase__ ( __magic_name__ :int ): '''simple docstring''' return index - (index & (-index)) def lowerCamelCase__ ( self :Any , __magic_name__ :int , __magic_name__ :int ): '''simple docstring''' if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value a = self.next_(__magic_name__ ) def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :int , __magic_name__ :int ): '''simple docstring''' self.add(__magic_name__ , value - self.get(__magic_name__ ) ) def lowerCamelCase__ ( self :int , __magic_name__ :int ): '''simple docstring''' if right == 0: return 0 a = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] a = self.prev(__magic_name__ ) return result def lowerCamelCase__ ( self :int , __magic_name__ :int , __magic_name__ :int ): '''simple docstring''' return self.prefix(__magic_name__ ) - self.prefix(__magic_name__ ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :int ): '''simple docstring''' return self.query(__magic_name__ , index + 1 ) def lowerCamelCase__ ( self :Dict , __magic_name__ :int ): '''simple docstring''' value -= self.tree[0] if value < 0: return -1 a = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 a = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
347
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __UpperCamelCase : Optional[Any] = { "configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"], "feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"], "processing_wav2vec2": ["Wav2Vec2Processor"], "tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : str = [ "WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "Wav2Vec2ForAudioFrameClassification", "Wav2Vec2ForCTC", "Wav2Vec2ForMaskedLM", "Wav2Vec2ForPreTraining", "Wav2Vec2ForSequenceClassification", "Wav2Vec2ForXVector", "Wav2Vec2Model", "Wav2Vec2PreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Dict = [ "TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "TFWav2Vec2ForCTC", "TFWav2Vec2Model", "TFWav2Vec2PreTrainedModel", "TFWav2Vec2ForSequenceClassification", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Union[str, Any] = [ "FlaxWav2Vec2ForCTC", "FlaxWav2Vec2ForPreTraining", "FlaxWav2Vec2Model", "FlaxWav2Vec2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
347
from __future__ import annotations from typing import Generic, TypeVar __UpperCamelCase : Union[str, Any] = TypeVar("T") class __lowerCAmelCase ( Generic[T] ): def __init__( self :Tuple , __magic_name__ :T ): '''simple docstring''' a = data a = self a = 0 class __lowerCAmelCase ( Generic[T] ): def __init__( self :Tuple ): '''simple docstring''' a = {} def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :T ): '''simple docstring''' a = DisjointSetTreeNode(__magic_name__ ) def lowerCamelCase__ ( self :List[Any] , __magic_name__ :T ): '''simple docstring''' a = self.map[data] if elem_ref != elem_ref.parent: a = self.find_set(elem_ref.parent.data ) return elem_ref.parent def lowerCamelCase__ ( self :List[Any] , __magic_name__ :DisjointSetTreeNode[T] , __magic_name__ :DisjointSetTreeNode[T] ): '''simple docstring''' if nodea.rank > nodea.rank: a = nodea else: a = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :T , __magic_name__ :T ): '''simple docstring''' self.link(self.find_set(__magic_name__ ) , self.find_set(__magic_name__ ) ) class __lowerCAmelCase ( Generic[T] ): def __init__( self :Union[str, Any] ): '''simple docstring''' a = {} def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :T ): '''simple docstring''' if node not in self.connections: a = {} def lowerCamelCase__ ( self :Any , __magic_name__ :T , __magic_name__ :T , __magic_name__ :int ): '''simple docstring''' self.add_node(__magic_name__ ) self.add_node(__magic_name__ ) a = weight a = weight def lowerCamelCase__ ( self :int ): '''simple docstring''' a = [] a = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda __magic_name__ : x[2] ) # creating the disjoint set a = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(__magic_name__ ) # MST generation a = 0 a = 0 a = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: a , a , a = edges[index] index += 1 a = disjoint_set.find_set(__magic_name__ ) a = disjoint_set.find_set(__magic_name__ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(__magic_name__ , __magic_name__ , __magic_name__ ) disjoint_set.union(__magic_name__ , __magic_name__ ) return graph
347
1
from ..utils import DummyObject, requires_backends class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :Tuple , *__magic_name__ :List[str] , **__magic_name__ :List[Any] ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :List[Any] , *__magic_name__ :Optional[int] , **__magic_name__ :List[str] ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :List[str] , *__magic_name__ :Optional[Any] , **__magic_name__ :Tuple ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :int , *__magic_name__ :Dict , **__magic_name__ :str ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :Dict , *__magic_name__ :Optional[Any] , **__magic_name__ :Optional[int] ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :Any , *__magic_name__ :str , **__magic_name__ :List[Any] ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :Optional[Any] , *__magic_name__ :Union[str, Any] , **__magic_name__ :Tuple ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :Tuple , *__magic_name__ :Any , **__magic_name__ :str ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :Tuple , *__magic_name__ :Optional[Any] , **__magic_name__ :List[str] ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :Dict , *__magic_name__ :Optional[int] , **__magic_name__ :Dict ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :Union[str, Any] , *__magic_name__ :Optional[Any] , **__magic_name__ :Union[str, Any] ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :List[Any] , *__magic_name__ :List[Any] , **__magic_name__ :Any ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :Optional[Any] , *__magic_name__ :List[str] , **__magic_name__ :Optional[int] ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :Dict , *__magic_name__ :List[str] , **__magic_name__ :str ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :str , *__magic_name__ :Any , **__magic_name__ :str ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :List[Any] , *__magic_name__ :Any , **__magic_name__ :Optional[Any] ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :List[str] , *__magic_name__ :Union[str, Any] , **__magic_name__ :Dict ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :List[Any] , *__magic_name__ :Optional[int] , **__magic_name__ :Any ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :List[str] , *__magic_name__ :str , **__magic_name__ :int ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :List[Any] , *__magic_name__ :Optional[Any] , **__magic_name__ :Optional[Any] ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :Dict , *__magic_name__ :Union[str, Any] , **__magic_name__ :Tuple ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :List[Any] , *__magic_name__ :Optional[int] , **__magic_name__ :Optional[int] ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :Tuple , *__magic_name__ :Optional[Any] , **__magic_name__ :List[str] ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :int , *__magic_name__ :Optional[Any] , **__magic_name__ :int ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :int , *__magic_name__ :Dict , **__magic_name__ :List[str] ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :Union[str, Any] , *__magic_name__ :Union[str, Any] , **__magic_name__ :List[Any] ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :Dict , *__magic_name__ :Dict , **__magic_name__ :List[str] ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :Any , *__magic_name__ :List[Any] , **__magic_name__ :str ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :Optional[Any] , *__magic_name__ :Tuple , **__magic_name__ :Any ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :Any , *__magic_name__ :Optional[Any] , **__magic_name__ :Optional[int] ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): UpperCamelCase__ = ['''sentencepiece'''] def __init__( self :Any , *__magic_name__ :Union[str, Any] , **__magic_name__ :Any ): '''simple docstring''' requires_backends(self , ["""sentencepiece"""] )
347
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = tempfile.mkdtemp() a = BlipImageProcessor() a = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) a = BlipProcessor(__magic_name__ , __magic_name__ ) processor.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self :List[Any] , **__magic_name__ :Union[str, Any] ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer def lowerCamelCase__ ( self :str , **__magic_name__ :List[str] ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor def lowerCamelCase__ ( self :int ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] a = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) a = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) a = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 ) a = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__magic_name__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __magic_name__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __magic_name__ ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = self.prepare_image_inputs() a = image_processor(__magic_name__ , return_tensors="""np""" ) a = processor(images=__magic_name__ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = """lower newer""" a = processor(text=__magic_name__ ) a = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = """lower newer""" a = self.prepare_image_inputs() a = processor(text=__magic_name__ , images=__magic_name__ ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(__magic_name__ ): processor() def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a = processor.batch_decode(__magic_name__ ) a = tokenizer.batch_decode(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = """lower newer""" a = self.prepare_image_inputs() a = processor(text=__magic_name__ , images=__magic_name__ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
347
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) __UpperCamelCase : Union[str, Any] = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Union[str, Any] = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys __UpperCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
347
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) __UpperCamelCase : int = { "shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json", # See all Nat models at https://huggingface.co/models?filter=nat } class __lowerCAmelCase ( __magic_name__ , __magic_name__ ): UpperCamelCase__ = '''nat''' UpperCamelCase__ = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self :Any , __magic_name__ :int=4 , __magic_name__ :Dict=3 , __magic_name__ :List[str]=64 , __magic_name__ :Optional[int]=[3, 4, 6, 5] , __magic_name__ :int=[2, 4, 8, 16] , __magic_name__ :str=7 , __magic_name__ :Tuple=3.0 , __magic_name__ :Dict=True , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.1 , __magic_name__ :Optional[Any]="gelu" , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :Tuple=1E-5 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :int=None , __magic_name__ :Any=None , **__magic_name__ :Dict , ): '''simple docstring''' super().__init__(**__magic_name__ ) a = patch_size a = num_channels a = embed_dim a = depths a = len(__magic_name__ ) a = num_heads a = kernel_size a = mlp_ratio a = qkv_bias a = hidden_dropout_prob a = attention_probs_dropout_prob a = drop_path_rate a = hidden_act a = layer_norm_eps a = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model a = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) ) a = layer_scale_init_value a = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(__magic_name__ ) + 1 )] a , a = get_aligned_output_features_output_indices( out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
347
1
import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import ArrayaD, ClassLabel, Features, Image, Value from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class __lowerCAmelCase ( __magic_name__ ): def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = pa.array(TypedSequence([1, 2, 3] ) ) self.assertEqual(arr.type , pa.intaa() ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' with self.assertRaises(__magic_name__ ): a = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' with self.assertRaises(__magic_name__ ): a = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): a = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): a = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) @require_pil def lowerCamelCase__ ( self :Any ): '''simple docstring''' import PIL.Image a = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) ) with patch( """datasets.arrow_writer.cast_to_python_objects""" , side_effect=__magic_name__ ) as mock_cast_to_python_objects: a = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) ) a , a = mock_cast_to_python_objects.call_args_list[-1] self.assertIn("""optimize_list_casting""" , __magic_name__ ) self.assertFalse(kwargs["""optimize_list_casting"""] ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> List[str]: a = pa.BufferReader(__lowerCamelCase ) if isinstance(__lowerCamelCase , pa.Buffer ) else pa.memory_map(__lowerCamelCase ) a = pa.ipc.open_stream(__lowerCamelCase ) a = f.read_all() assert len(pa_table.to_batches() ) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: a = pa.BufferOutputStream() a = pa.schema(__lowerCamelCase ) if fields else None with ArrowWriter(stream=__lowerCamelCase , schema=__lowerCamelCase , writer_batch_size=__lowerCamelCase ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) a , a = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: a = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__lowerCamelCase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def __A ( ) -> List[str]: a = pa.BufferOutputStream() a = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} ) with ArrowWriter(stream=__lowerCamelCase , features=__lowerCamelCase ) as writer: writer.write({"""labels""": 0} ) writer.write({"""labels""": 1} ) a , a = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata a = pa.BufferReader(output.getvalue() ) a = pa.ipc.open_stream(__lowerCamelCase ) a = f.read_all() a = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(__lowerCamelCase ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) def __A ( __lowerCamelCase ) -> Union[str, Any]: a = pa.BufferOutputStream() with ArrowWriter( stream=__lowerCamelCase , writer_batch_size=__lowerCamelCase , hash_salt="""split_name""" , check_duplicates=__lowerCamelCase , ) as writer: with pytest.raises(__lowerCamelCase ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] ) a , a = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] ) def __A ( __lowerCamelCase ) -> Any: a = pa.BufferOutputStream() with ArrowWriter( stream=__lowerCamelCase , writer_batch_size=__lowerCamelCase , hash_salt="""split_name""" , check_duplicates=__lowerCamelCase , ) as writer: with pytest.raises(__lowerCamelCase ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 ) a , a = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] ) def __A ( __lowerCamelCase ) -> str: a = pa.BufferOutputStream() with ArrowWriter( stream=__lowerCamelCase , writer_batch_size=__lowerCamelCase , hash_salt="""split_name""" , check_duplicates=__lowerCamelCase , ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 ) a , a = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: a = pa.BufferOutputStream() a = pa.schema(__lowerCamelCase ) if fields else None with ArrowWriter(stream=__lowerCamelCase , schema=__lowerCamelCase , writer_batch_size=__lowerCamelCase ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) writer.write_batch({"""col_1""": [], """col_2""": []} ) a , a = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: a = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__lowerCamelCase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]: a = pa.BufferOutputStream() a = pa.schema(__lowerCamelCase ) if fields else None with ArrowWriter(stream=__lowerCamelCase , schema=__lowerCamelCase , writer_batch_size=__lowerCamelCase ) as writer: writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) ) a , a = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: a = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__lowerCamelCase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]: a = pa.BufferOutputStream() a = pa.schema(__lowerCamelCase ) if fields else None with ArrowWriter(stream=__lowerCamelCase , schema=__lowerCamelCase , writer_batch_size=__lowerCamelCase ) as writer: writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) ) writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) ) a , a = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: a = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__lowerCamelCase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def __A ( ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmp_dir: a = {"""col_1""": pa.string(), """col_2""": pa.intaa()} a = os.path.join(__lowerCamelCase , """test.arrow""" ) with ArrowWriter(path=__lowerCamelCase , schema=pa.schema(__lowerCamelCase ) ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) a , a = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(__lowerCamelCase , metadata=writer._schema.metadata ) _check_output(__lowerCamelCase , 1 ) def __A ( __lowerCamelCase ) -> Any: if pa.types.is_list(__lowerCamelCase ): return get_base_dtype(arr_type.value_type ) else: return arr_type def __A ( __lowerCamelCase , __lowerCamelCase ) -> Any: if isinstance(lst[0] , __lowerCamelCase ): change_first_primitive_element_in_list(lst[0] , __lowerCamelCase ) else: a = value @pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any: a = pa.array(TypedSequence(__lowerCamelCase , optimized_int_type=__lowerCamelCase ) ) assert get_base_dtype(arr.type ) == expected_dtype @pytest.mark.parametrize( """col, expected_dtype""" , [ ("""attention_mask""", pa.inta()), ("""special_tokens_mask""", pa.inta()), ("""token_type_ids""", pa.inta()), ("""input_ids""", pa.intaa()), ("""other""", pa.intaa()), ] , ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]: # in range a = pa.array(OptimizedTypedSequence(__lowerCamelCase , col=__lowerCamelCase ) ) assert get_base_dtype(arr.type ) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications a = copy.deepcopy(__lowerCamelCase ) a = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1 change_first_primitive_element_in_list(__lowerCamelCase , __lowerCamelCase ) a = pa.array(OptimizedTypedSequence(__lowerCamelCase , col=__lowerCamelCase ) ) assert get_base_dtype(arr.type ) == pa.intaa() @pytest.mark.parametrize("""raise_exception""" , [False, True] ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> Any: a = str(tmp_path / """dataset-train.arrow""" ) try: with ArrowWriter(path=__lowerCamelCase ) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def __A ( __lowerCamelCase ) -> Optional[Any]: a = """mock://dataset-train.arrow""" with ArrowWriter(path=__lowerCamelCase , storage_options=mockfs.storage_options ) as writer: assert isinstance(writer._fs , type(__lowerCamelCase ) ) assert writer._fs.storage_options == mockfs.storage_options writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) a , a = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(__lowerCamelCase ) def __A ( ) -> List[str]: a = pa.BufferOutputStream() with ParquetWriter(stream=__lowerCamelCase ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) a , a = writer.finalize() assert num_examples == 2 assert num_bytes > 0 a = pa.BufferReader(output.getvalue() ) a = pq.read_table(__lowerCamelCase ) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize("""embed_local_files""" , [False, True] ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> Dict: import PIL.Image a = str(tmp_path / """test_image_rgb.jpg""" ) PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__lowerCamelCase , format="""png""" ) a = pa.BufferOutputStream() with ParquetWriter( stream=__lowerCamelCase , features=Features({"""image""": Image()} ) , embed_local_files=__lowerCamelCase ) as writer: writer.write({"""image""": image_path} ) writer.finalize() a = pa.BufferReader(output.getvalue() ) a = pq.read_table(__lowerCamelCase ) a = pa_table.to_pydict() if embed_local_files: assert isinstance(out["""image"""][0]["""path"""] , __lowerCamelCase ) with open(__lowerCamelCase , """rb""" ) as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def __A ( ) -> Tuple: a = pa.schema([pa.field("""col_1""" , pa.string() , nullable=__lowerCamelCase )] ) a = pa.BufferOutputStream() with ArrowWriter(stream=__lowerCamelCase ) as writer: writer._build_writer(inferred_schema=__lowerCamelCase ) assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
347
import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]: if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer a = flax_key_tuple[:-1] + ("""weight""",) a = torch.permute(__lowerCamelCase , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ): # linear layer a = flax_key_tuple[:-1] + ("""weight""",) a = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: a = flax_key_tuple[:-1] + ("""weight""",) return flax_key_tuple, flax_tensor def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]: if "metadata" in layer: a = layer.split("""metadata""" ) a = """""".join(split_layer[0] )[:-1] a = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )] elif "kvstore" in layer: a = layer.split("""kvstore""" ) a = """""".join(split_layer[0] )[:-1] a = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )] else: a = layer.split("""/""" ) a = """/""".join(split_layer[:-1] ) a = (split_layer[-1],) if "kvstore/path" in layer: a = f'{switch_checkpoint_path}/{checkpoint_info[layer]}' elif "kvstore/driver" in layer: a = """file""" else: a = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def __A ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]: a = rename_keys(__lowerCamelCase ) a = {} for k, v in current_block.items(): a = v a = new_current_block torch.save(__lowerCamelCase , __lowerCamelCase ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = WEIGHTS_NAME ) -> List[str]: a = convert_file_size_to_int(__lowerCamelCase ) a = [] a = {} a = 0 a = 0 os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp: a = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""] a = flatten_dict(__lowerCamelCase , sep="""/""" ) a = {} for layer in checkpoint_info.keys(): a , a , a = get_key_and_tensorstore_dict( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if curr_real_layer_name in all_layers: a = content else: a = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file a = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() a = torch.tensor(__lowerCamelCase ) a = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts a , a = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __lowerCamelCase ) a = """/""".join(__lowerCamelCase ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: a = os.path.join( __lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) ) rename_and_save_block(__lowerCamelCase , __lowerCamelCase ) sharded_state_dicts.append(current_block.keys() ) del current_block a = {} a = 0 a = raw_weights.to(getattr(__lowerCamelCase , __lowerCamelCase ) ) current_block_size += weight_size total_size += weight_size # Add the last block a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) ) rename_and_save_block(__lowerCamelCase , __lowerCamelCase ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(__lowerCamelCase ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index a = {} a = {} for idx, shard in enumerate(__lowerCamelCase ): a = weights_name.replace( """.bin""" , f'-{idx+1:05d}-of-{len(__lowerCamelCase ):05d}.bin' ) # len(sharded_state_dicts):05d} a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{idx+1:05d}-of-???.bin' ) ) os.rename(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) ) a = shard for key in shard: a = shard_file # Add the metadata a = {"""total_size""": total_size} a = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f: a = json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase ) + """\n""" f.write(__lowerCamelCase ) return metadata, index if __name__ == "__main__": __UpperCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600", type=str, required=False, help="Path to a directory containing a folder per layer. Follows the original Google format.", ) parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size") parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model") parser.add_argument( "--pytorch_dump_folder_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted", type=str, required=False, help="Path to the output pytorch model.", ) __UpperCamelCase : Any = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def __A ( ) -> Tuple: from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer a = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" ) config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" ) a = SwitchTransformersForConditionalGeneration.from_pretrained( """/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" ) a = TaTokenizer.from_pretrained("""t5-small""" ) a = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""" a = tokenizer(__lowerCamelCase , return_tensors="""pt""" ).input_ids a = model.generate(__lowerCamelCase , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
347
1
from typing import Any def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> list: _validation( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) # Creates data structures and fill initial step a = {} a = {} for state in states_space: a = observations_space[0] a = ( initial_probabilities[state] * emission_probabilities[state][observation] ) a = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(__lowerCamelCase ) ): a = observations_space[o] a = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function a = """""" a = -1 for k_state in states_space: a = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: a = probability a = k_state # Update probabilities and pointers dicts a = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) a = arg_max # The final observation a = observations_space[len(__lowerCamelCase ) - 1] # argmax for given final observation a = """""" a = -1 for k_state in states_space: a = probabilities[(k_state, final_observation)] if probability > max_probability: a = probability a = k_state a = arg_max # Process pointers backwards a = last_state a = [] for o in range(len(__lowerCamelCase ) - 1 , -1 , -1 ): result.append(__lowerCamelCase ) a = pointers[previous, observations_space[o]] result.reverse() return result def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> None: _validate_not_empty( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) _validate_lists(__lowerCamelCase , __lowerCamelCase ) _validate_dicts( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> None: if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("""There's an empty parameter""" ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> None: _validate_list(__lowerCamelCase , """observations_space""" ) _validate_list(__lowerCamelCase , """states_space""" ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> None: if not isinstance(_object , __lowerCamelCase ): a = f'{var_name} must be a list' raise ValueError(__lowerCamelCase ) else: for x in _object: if not isinstance(__lowerCamelCase , __lowerCamelCase ): a = f'{var_name} must be a list of strings' raise ValueError(__lowerCamelCase ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> None: _validate_dict(__lowerCamelCase , """initial_probabilities""" , __lowerCamelCase ) _validate_nested_dict(__lowerCamelCase , """transition_probabilities""" ) _validate_nested_dict(__lowerCamelCase , """emission_probabilities""" ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> None: _validate_dict(_object , __lowerCamelCase , __lowerCamelCase ) for x in _object.values(): _validate_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False ) -> None: if not isinstance(_object , __lowerCamelCase ): a = f'{var_name} must be a dict' raise ValueError(__lowerCamelCase ) if not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for x in _object ): a = f'{var_name} all keys must be strings' raise ValueError(__lowerCamelCase ) if not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for x in _object.values() ): a = """nested dictionary """ if nested else """""" a = f'{var_name} {nested_text}all values must be {value_type.__name__}' raise ValueError(__lowerCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
347
import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters __UpperCamelCase : Union[str, Any] = (720, 1_280) # Height, Width __UpperCamelCase : Any = (0.4, 0.6) # if height or width lower than this scale, drop it. __UpperCamelCase : str = 1 / 100 __UpperCamelCase : Optional[int] = "" __UpperCamelCase : List[Any] = "" __UpperCamelCase : Union[str, Any] = "" __UpperCamelCase : Tuple = 250 def __A ( ) -> None: a , a = get_dataset(__lowerCamelCase , __lowerCamelCase ) for index in range(__lowerCamelCase ): a = random.sample(range(len(__lowerCamelCase ) ) , 4 ) a , a , a = update_image_and_anno( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , filter_scale=__lowerCamelCase , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' a = random_chars(32 ) a = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0] a = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}' cva.imwrite(f'{file_root}.jpg' , __lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' ) a = [] for anno in new_annos: a = anno[3] - anno[1] a = anno[4] - anno[2] a = anno[1] + width / 2 a = anno[2] + height / 2 a = f'{anno[0]} {x_center} {y_center} {width} {height}' annos_list.append(__lowerCamelCase ) with open(f'{file_root}.txt' , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> tuple[list, list]: a = [] a = [] for label_file in glob.glob(os.path.join(__lowerCamelCase , """*.txt""" ) ): a = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(__lowerCamelCase ) as in_file: a = in_file.readlines() a = os.path.join(__lowerCamelCase , f'{label_name}.jpg' ) a = [] for obj_list in obj_lists: a = obj_list.rstrip("""\n""" ).split(""" """ ) a = float(obj[1] ) - float(obj[3] ) / 2 a = float(obj[2] ) - float(obj[4] ) / 2 a = float(obj[1] ) + float(obj[3] ) / 2 a = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(__lowerCamelCase ) labels.append(__lowerCamelCase ) return img_paths, labels def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 0.0 , ) -> tuple[list, list, str]: a = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) a = int(scale_x * output_size[1] ) a = int(scale_y * output_size[0] ) a = [] a = [] for i, index in enumerate(__lowerCamelCase ): a = all_img_list[index] path_list.append(__lowerCamelCase ) a = all_annos[index] a = cva.imread(__lowerCamelCase ) if i == 0: # top-left a = cva.resize(__lowerCamelCase , (divid_point_x, divid_point_y) ) a = img for bbox in img_annos: a = bbox[1] * scale_x a = bbox[2] * scale_y a = bbox[3] * scale_x a = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right a = cva.resize(__lowerCamelCase , (output_size[1] - divid_point_x, divid_point_y) ) a = img for bbox in img_annos: a = scale_x + bbox[1] * (1 - scale_x) a = bbox[2] * scale_y a = scale_x + bbox[3] * (1 - scale_x) a = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left a = cva.resize(__lowerCamelCase , (divid_point_x, output_size[0] - divid_point_y) ) a = img for bbox in img_annos: a = bbox[1] * scale_x a = scale_y + bbox[2] * (1 - scale_y) a = bbox[3] * scale_x a = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right a = cva.resize( __lowerCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) a = img for bbox in img_annos: a = scale_x + bbox[1] * (1 - scale_x) a = scale_y + bbox[2] * (1 - scale_y) a = scale_x + bbox[3] * (1 - scale_x) a = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: a = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def __A ( __lowerCamelCase ) -> str: assert number_char > 1, "The number of character should greater than 1" a = ascii_lowercase + digits return "".join(random.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) ) if __name__ == "__main__": main() print("DONE ✅")
347
1
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): UpperCamelCase__ = CanineTokenizer UpperCamelCase__ = False def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' super().setUp() a = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowerCamelCase__ ( self :Dict ): '''simple docstring''' return CanineTokenizer.from_pretrained("""google/canine-s""" ) def lowerCamelCase__ ( self :Tuple , **__magic_name__ :Dict ): '''simple docstring''' a = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ ) a = 1024 return tokenizer @require_torch def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.canine_tokenizer a = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""] # fmt: off a = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0] # fmt: on a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) self.assertIsInstance(__magic_name__ , __magic_name__ ) a = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.canine_tokenizer a = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""] a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("""input_ids""" , __magic_name__ ) self.assertIn("""attention_mask""" , __magic_name__ ) self.assertIn("""token_type_ids""" , __magic_name__ ) @require_torch def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.canine_tokenizer a = [ """What's the weater?""", """It's about 25 degrees.""", ] a = tokenizer( text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test a = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc a = tempfile.mkdtemp() a = """ He is very happy, UNwant\u00E9d,running""" a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) a = tokenizer.__class__.from_pretrained(__magic_name__ ) a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) shutil.rmtree(__magic_name__ ) a = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc a = tempfile.mkdtemp() a = """ He is very happy, UNwant\u00E9d,running""" a = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: a = chr(0Xe_0_0_7 ) additional_special_tokens.append(__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) a = tokenizer.__class__.from_pretrained(__magic_name__ ) a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) a = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__magic_name__ ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a , a = self.get_clean_sequence(__magic_name__ ) # a special token for Canine can be defined as follows: a = 0Xe_0_0_5 a = chr(__magic_name__ ) tokenizer.add_special_tokens({"""cls_token""": special_token} ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) a = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(__magic_name__ , input_encoded + special_token_id ) a = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) self.assertTrue(special_token not in decoded ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a = chr(0Xe_0_0_5 ) a = chr(0Xe_0_0_6 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} ) a = tokenizer.tokenize(__magic_name__ ) a = tokenizer.tokenize(__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(token_a[0] , __magic_name__ ) self.assertEqual(token_a[0] , __magic_name__ ) @require_tokenizers def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # a special token for Canine can be defined as follows: a = 0Xe_0_0_6 a = chr(__magic_name__ ) a = AddedToken(__magic_name__ , lstrip=__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(__magic_name__ ) tokenizer.from_pretrained(__magic_name__ ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__magic_name__ ) with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: a = json.load(__magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: a = json.load(__magic_name__ ) # a special token for Canine can be defined as follows: a = 0Xe_0_0_6 a = chr(__magic_name__ ) a = [new_token_a] a = [new_token_a] with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files a = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) a = 0Xe_0_0_7 a = chr(__magic_name__ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained a = [AddedToken(__magic_name__ , lstrip=__magic_name__ )] a = tokenizer_class.from_pretrained( __magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a = """hello world""" if self.space_between_special_tokens: a = """[CLS] hello world [SEP]""" else: a = input a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) a = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(__magic_name__ , [output, output.lower()] ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] a = """a""" a = ord(__magic_name__ ) for attr in attributes_list: setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] ) a = 0Xe_0_0_6 a = chr(__magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' pass def lowerCamelCase__ ( self :str ): '''simple docstring''' pass def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass def lowerCamelCase__ ( self :Any ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass
347
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase : Optional[Any] = { "configuration_mobilenet_v2": [ "MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileNetV2Config", "MobileNetV2OnnxConfig", ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = ["MobileNetV2FeatureExtractor"] __UpperCamelCase : Tuple = ["MobileNetV2ImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = [ "MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileNetV2ForImageClassification", "MobileNetV2ForSemanticSegmentation", "MobileNetV2Model", "MobileNetV2PreTrainedModel", "load_tf_weights_in_mobilenet_v2", ] if TYPE_CHECKING: from .configuration_mobilenet_va import ( MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileNetVaConfig, MobileNetVaOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor from .image_processing_mobilenet_va import MobileNetVaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilenet_va import ( MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel, MobileNetVaPreTrainedModel, load_tf_weights_in_mobilenet_va, ) else: import sys __UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
347
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __UpperCamelCase : Optional[int] = { "configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"], "tokenization_xlm": ["XLMTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : List[Any] = [ "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", "XLMForSequenceClassification", "XLMForTokenClassification", "XLMModel", "XLMPreTrainedModel", "XLMWithLMHeadModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Union[str, Any] = [ "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", "TFXLMForTokenClassification", "TFXLMMainLayer", "TFXLMModel", "TFXLMPreTrainedModel", "TFXLMWithLMHeadModel", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys __UpperCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
347
def __A ( __lowerCamelCase ) -> bool: if num < 0: return False a = num a = 0 while num > 0: a = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
347
1
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging __UpperCamelCase : int = logging.get_logger(__name__) def __A ( __lowerCamelCase=None , __lowerCamelCase=None ) -> Any: return field(default_factory=lambda: default , metadata=__lowerCamelCase ) @dataclass class __lowerCAmelCase : UpperCamelCase__ = list_field( default=[] , metadata={ '''help''': ( '''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version''' ''' of all available models''' ) } , ) UpperCamelCase__ = list_field( default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} ) UpperCamelCase__ = list_field( default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , ) UpperCamelCase__ = field( default=__magic_name__ , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , ) UpperCamelCase__ = field( default=__magic_name__ , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , ) UpperCamelCase__ = field( default=__magic_name__ , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} ) UpperCamelCase__ = field(default=__magic_name__ , metadata={'''help''': '''Use FP16 to accelerate inference.'''} ) UpperCamelCase__ = field(default=__magic_name__ , metadata={'''help''': '''Benchmark training of model'''} ) UpperCamelCase__ = field(default=__magic_name__ , metadata={'''help''': '''Verbose memory tracing'''} ) UpperCamelCase__ = field( default=__magic_name__ , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , ) UpperCamelCase__ = field( default=__magic_name__ , metadata={ '''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory''' } , ) UpperCamelCase__ = field(default=__magic_name__ , metadata={'''help''': '''Trace memory line by line'''} ) UpperCamelCase__ = field(default=__magic_name__ , metadata={'''help''': '''Save result to a CSV file'''} ) UpperCamelCase__ = field(default=__magic_name__ , metadata={'''help''': '''Save all print statements in a log file'''} ) UpperCamelCase__ = field(default=__magic_name__ , metadata={'''help''': '''Whether to print environment information'''} ) UpperCamelCase__ = field( default=__magic_name__ , metadata={ '''help''': ( '''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use''' ''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled''' ''' for debugging / testing and on TPU.''' ) } , ) UpperCamelCase__ = field( default=F"""inference_time_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , ) UpperCamelCase__ = field( default=F"""inference_memory_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , ) UpperCamelCase__ = field( default=F"""train_time_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , ) UpperCamelCase__ = field( default=F"""train_memory_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , ) UpperCamelCase__ = field( default=F"""env_info_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving environment information.'''} , ) UpperCamelCase__ = field( default=F"""log_{round(time() )}.csv""" , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , ) UpperCamelCase__ = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} ) UpperCamelCase__ = field( default=__magic_name__ , metadata={ '''help''': ( '''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain''' ''' model weights.''' ) } , ) def lowerCamelCase__ ( self :int ): '''simple docstring''' warnings.warn( F'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils' """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" , __magic_name__ , ) def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def lowerCamelCase__ ( self :str ): '''simple docstring''' if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
347
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): UpperCamelCase__ = CanineTokenizer UpperCamelCase__ = False def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' super().setUp() a = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowerCamelCase__ ( self :Dict ): '''simple docstring''' return CanineTokenizer.from_pretrained("""google/canine-s""" ) def lowerCamelCase__ ( self :Tuple , **__magic_name__ :Dict ): '''simple docstring''' a = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ ) a = 1024 return tokenizer @require_torch def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.canine_tokenizer a = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""] # fmt: off a = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0] # fmt: on a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) self.assertIsInstance(__magic_name__ , __magic_name__ ) a = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.canine_tokenizer a = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""] a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("""input_ids""" , __magic_name__ ) self.assertIn("""attention_mask""" , __magic_name__ ) self.assertIn("""token_type_ids""" , __magic_name__ ) @require_torch def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.canine_tokenizer a = [ """What's the weater?""", """It's about 25 degrees.""", ] a = tokenizer( text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test a = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc a = tempfile.mkdtemp() a = """ He is very happy, UNwant\u00E9d,running""" a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) a = tokenizer.__class__.from_pretrained(__magic_name__ ) a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) shutil.rmtree(__magic_name__ ) a = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc a = tempfile.mkdtemp() a = """ He is very happy, UNwant\u00E9d,running""" a = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: a = chr(0Xe_0_0_7 ) additional_special_tokens.append(__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) a = tokenizer.__class__.from_pretrained(__magic_name__ ) a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) a = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__magic_name__ ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a , a = self.get_clean_sequence(__magic_name__ ) # a special token for Canine can be defined as follows: a = 0Xe_0_0_5 a = chr(__magic_name__ ) tokenizer.add_special_tokens({"""cls_token""": special_token} ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) a = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(__magic_name__ , input_encoded + special_token_id ) a = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) self.assertTrue(special_token not in decoded ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a = chr(0Xe_0_0_5 ) a = chr(0Xe_0_0_6 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} ) a = tokenizer.tokenize(__magic_name__ ) a = tokenizer.tokenize(__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(token_a[0] , __magic_name__ ) self.assertEqual(token_a[0] , __magic_name__ ) @require_tokenizers def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # a special token for Canine can be defined as follows: a = 0Xe_0_0_6 a = chr(__magic_name__ ) a = AddedToken(__magic_name__ , lstrip=__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(__magic_name__ ) tokenizer.from_pretrained(__magic_name__ ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__magic_name__ ) with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: a = json.load(__magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: a = json.load(__magic_name__ ) # a special token for Canine can be defined as follows: a = 0Xe_0_0_6 a = chr(__magic_name__ ) a = [new_token_a] a = [new_token_a] with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files a = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) a = 0Xe_0_0_7 a = chr(__magic_name__ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained a = [AddedToken(__magic_name__ , lstrip=__magic_name__ )] a = tokenizer_class.from_pretrained( __magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a = """hello world""" if self.space_between_special_tokens: a = """[CLS] hello world [SEP]""" else: a = input a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) a = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(__magic_name__ , [output, output.lower()] ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] a = """a""" a = ord(__magic_name__ ) for attr in attributes_list: setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] ) a = 0Xe_0_0_6 a = chr(__magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' pass def lowerCamelCase__ ( self :str ): '''simple docstring''' pass def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass def lowerCamelCase__ ( self :Any ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass
347
1
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]: a = XCLIPTextConfig() # derive patch size from model name a = model_name.find("""patch""" ) a = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] ) a = XCLIPVisionConfig(patch_size=__lowerCamelCase , num_frames=__lowerCamelCase ) if "large" in model_name: a = 768 a = 3072 a = 12 a = 1024 a = 4096 a = 16 a = 24 a = 768 a = 3072 if model_name == "xclip-large-patch14-16-frames": a = 336 a = XCLIPConfig.from_text_vision_configs(__lowerCamelCase , __lowerCamelCase ) if "large" in model_name: a = 768 return config def __A ( __lowerCamelCase ) -> Dict: # text encoder if name == "token_embedding.weight": a = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" ) if name == "positional_embedding": a = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" ) if "ln_1" in name: a = name.replace("""ln_1""" , """layer_norm1""" ) if "ln_2" in name: a = name.replace("""ln_2""" , """layer_norm2""" ) if "c_fc" in name: a = name.replace("""c_fc""" , """fc1""" ) if "c_proj" in name: a = name.replace("""c_proj""" , """fc2""" ) if name.startswith("""transformer.resblocks""" ): a = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" ) if "attn.out_proj" in name and "message" not in name: a = name.replace("""attn.out_proj""" , """self_attn.out_proj""" ) if "ln_final" in name: a = name.replace("""ln_final""" , """text_model.final_layer_norm""" ) # visual encoder if name == "visual.class_embedding": a = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" ) if name == "visual.positional_embedding": a = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" ) if name.startswith("""visual.transformer.resblocks""" ): a = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" ) if "visual.conv1" in name: a = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" ) if "visual.ln_pre" in name: a = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" ) if "visual.ln_post" in name: a = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" ) if "visual.proj" in name: a = name.replace("""visual.proj""" , """visual_projection.weight""" ) if "text_projection" in name: a = name.replace("""text_projection""" , """text_projection.weight""" ) # things on top if "prompts_visual_proj" in name: a = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" ) if "prompts_visual_ln" in name: a = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" ) # mit if name == "mit.positional_embedding": a = name.replace("""positional""" , """position""" ) if name.startswith("""mit.resblocks""" ): a = name.replace("""mit.resblocks""" , """mit.encoder.layers""" ) # prompts generator if name.startswith("""prompts_generator.norm""" ): a = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" ) return name def __A ( __lowerCamelCase , __lowerCamelCase ) -> List[str]: for key in orig_state_dict.copy().keys(): a = orig_state_dict.pop(__lowerCamelCase ) if "attn.in_proj" in key: a = key.split(""".""" ) if key.startswith("""visual""" ): a = key_split[3] a = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: a = val[ :dim, : ] a = val[ dim : dim * 2, : ] a = val[ -dim:, : ] else: a = val[ :dim ] a = val[ dim : dim * 2 ] a = val[ -dim: ] else: if "weight" in key: a = val[ :dim, : ] a = val[ dim : dim * 2, : ] a = val[ -dim:, : ] else: a = val[:dim] a = val[ dim : dim * 2 ] a = val[-dim:] elif key.startswith("""mit""" ): a = key_split[2] a = config.vision_config.mit_hidden_size if "weight" in key: a = val[:dim, :] a = val[dim : dim * 2, :] a = val[-dim:, :] else: a = val[:dim] a = val[dim : dim * 2] a = val[-dim:] else: a = key_split[2] a = config.text_config.hidden_size if "weight" in key: a = val[:dim, :] a = val[ dim : dim * 2, : ] a = val[-dim:, :] else: a = val[:dim] a = val[ dim : dim * 2 ] a = val[-dim:] else: a = rename_key(__lowerCamelCase ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: a = val.T a = val return orig_state_dict def __A ( __lowerCamelCase ) -> List[str]: if num_frames == 8: a = """eating_spaghetti_8_frames.npy""" elif num_frames == 16: a = """eating_spaghetti.npy""" elif num_frames == 32: a = """eating_spaghetti_32_frames.npy""" a = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename=__lowerCamelCase , repo_type="""dataset""" , ) a = np.load(__lowerCamelCase ) return list(__lowerCamelCase ) def __A ( __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=False ) -> Dict: a = { # fully supervised kinetics-400 checkpoints """xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""", """xclip-base-patch32-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth""" ), """xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""", """xclip-base-patch16-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth""" ), """xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb""", """xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f""", # fully supervised kinetics-600 checkpoints """xclip-base-patch16-kinetics-600""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth""" ), """xclip-base-patch16-kinetics-600-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth""" ), """xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be""", # few shot """xclip-base-patch16-hmdb-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth""" ), """xclip-base-patch16-hmdb-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth""" ), """xclip-base-patch16-hmdb-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth""" ), """xclip-base-patch16-hmdb-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth""" ), """xclip-base-patch16-ucf-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth""" ), """xclip-base-patch16-ucf-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth""" ), """xclip-base-patch16-ucf-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth""" ), """xclip-base-patch16-ucf-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth""" ), # zero shot """xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""", } a = model_to_url[model_name] a = 8 if "16-frames" in model_name: a = 16 elif "shot" in model_name: a = 32 a = get_xclip_config(__lowerCamelCase , __lowerCamelCase ) a = XCLIPModel(__lowerCamelCase ) model.eval() if "drive" in checkpoint_url: a = """pytorch_model.bin""" gdown.cached_download(__lowerCamelCase , __lowerCamelCase , quiet=__lowerCamelCase ) a = torch.load(__lowerCamelCase , map_location="""cpu""" )["""model"""] else: a = torch.hub.load_state_dict_from_url(__lowerCamelCase )["""model"""] a = convert_state_dict(__lowerCamelCase , __lowerCamelCase ) a = XCLIPModel(__lowerCamelCase ) a , a = model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() a = 336 if model_name == """xclip-large-patch14-16-frames""" else 224 a = VideoMAEImageProcessor(size=__lowerCamelCase ) a = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" ) a = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" ) a = XCLIPProcessor(image_processor=__lowerCamelCase , tokenizer=__lowerCamelCase ) a = prepare_video(__lowerCamelCase ) a = processor( text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=__lowerCamelCase , return_tensors="""pt""" , padding=__lowerCamelCase ) print("""Shape of pixel values:""" , inputs.pixel_values.shape ) with torch.no_grad(): a = model(**__lowerCamelCase ) # Verify outputs a = outputs.logits_per_video a = logits_per_video.softmax(dim=1 ) print("""Probs:""" , __lowerCamelCase ) # kinetics-400 if model_name == "xclip-base-patch32": a = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": a = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] ) elif model_name == "xclip-base-patch16": a = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": a = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] ) elif model_name == "xclip-large-patch14": a = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": a = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": a = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": a = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] ) elif model_name == "xclip-large-patch14-kinetics-600": a = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": a = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": a = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": a = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": a = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": a = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": a = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": a = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": a = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": a = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] ) else: raise ValueError(f'Model name {model_name} not supported' ) assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(__lowerCamelCase ) if push_to_hub: print("""Pushing model, processor and slow tokenizer files to the hub...""" ) model.push_to_hub(__lowerCamelCase , organization="""nielsr""" ) processor.push_to_hub(__lowerCamelCase , organization="""nielsr""" ) slow_tokenizer.push_to_hub(__lowerCamelCase , organization="""nielsr""" ) if __name__ == "__main__": __UpperCamelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="xclip-base-patch32", type=str, help="Name of the model.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) __UpperCamelCase : List[Any] = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
347
def __A ( __lowerCamelCase ) -> bool: return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
347
1
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __UpperCamelCase : int = logging.get_logger(__name__) __UpperCamelCase : Optional[int] = {"vocab_file": "spiece.model"} __UpperCamelCase : Union[str, Any] = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } __UpperCamelCase : Dict = { "AI-Sweden/gpt-sw3-126m": 2_048, "AI-Sweden/gpt-sw3-350m": 2_048, "AI-Sweden/gpt-sw3-1.6b": 2_048, "AI-Sweden/gpt-sw3-6.7b": 2_048, "AI-Sweden/gpt-sw3-20b": 2_048, } class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = VOCAB_FILES_NAMES UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ = ['''input_ids''', '''attention_mask'''] def __init__( self :Dict , __magic_name__ :Any , __magic_name__ :Dict=False , __magic_name__ :Optional[int]=False , __magic_name__ :Any=False , __magic_name__ :Optional[int]=None , __magic_name__ :Dict=None , __magic_name__ :str=None , __magic_name__ :List[str]=None , __magic_name__ :Optional[Dict[str, Any]] = None , **__magic_name__ :Union[str, Any] , ): '''simple docstring''' a = {} if sp_model_kwargs is None else sp_model_kwargs a = kwargs.get("""name_or_path""" ) if name_or_path is None: logger.warning( """name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,""" """ you are testing the model, this can safely be ignored""" ) a = """None""" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing a = """<|endoftext|>""" if eos_token is None else eos_token a = """<unk>""" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: a = unk_token if pad_token is None else pad_token a = eos_token if bos_token is None else bos_token else: a = """<pad>""" if pad_token is None else pad_token a = """<s>""" if bos_token is None else bos_token super().__init__( do_lower_case=__magic_name__ , remove_space=__magic_name__ , keep_accents=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , ) a = do_lower_case a = remove_space a = keep_accents a = vocab_file a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__magic_name__ ) # Used for whitespace normalization in input texts # fmt : off a = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """„"""} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing a = re.compile( F'[{"".join(map(__magic_name__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' ) def __getstate__( self :Any ): '''simple docstring''' a = self.__dict__.copy() a = None return state def __setstate__( self :int , __magic_name__ :Any ): '''simple docstring''' a = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): a = {} a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def lowerCamelCase__ ( self :int ): '''simple docstring''' return len(self.sp_model ) def lowerCamelCase__ ( self :List[str] , __magic_name__ :str ): '''simple docstring''' a = self.non_printing_characters_re.sub("""""" , __magic_name__ ) # Normalize whitespaces a = """""".join([char if char not in self.whitespaces else """ """ for char in text] ) # NFC Unicode normalization a = unicodedata.normalize("""NFC""" , __magic_name__ ) return text def lowerCamelCase__ ( self :Any , __magic_name__ :str , **__magic_name__ :Any ): '''simple docstring''' a = self.preprocess_text(__magic_name__ ) return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ ) def lowerCamelCase__ ( self :Dict , __magic_name__ :str ): '''simple docstring''' return self.sp_model.PieceToId(__magic_name__ ) def lowerCamelCase__ ( self :List[Any] , __magic_name__ :int ): '''simple docstring''' return self.sp_model.IdToPiece(__magic_name__ ) @staticmethod def lowerCamelCase__ ( __magic_name__ :str ): '''simple docstring''' return out_string def lowerCamelCase__ ( self :Tuple , __magic_name__ :List[str] ): '''simple docstring''' a = [] a = """""" a = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__magic_name__ ) + token a = True a = [] else: current_sub_tokens.append(__magic_name__ ) a = False out_string += self.sp_model.decode(__magic_name__ ) return out_string def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase__ ( self :str , __magic_name__ :str , __magic_name__ :Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__magic_name__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return a = os.path.join( __magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __magic_name__ ) elif not os.path.isfile(self.vocab_file ): with open(__magic_name__ , """wb""" ) as fi: a = self.sp_model.serialized_model_proto() fi.write(__magic_name__ ) return (out_vocab_file,) def lowerCamelCase__ ( self :List[Any] , __magic_name__ :Union[str, List[str]] , __magic_name__ :Union[str, bool] = False ): '''simple docstring''' if isinstance(__magic_name__ , __magic_name__ ): a = self.preprocess_text(__magic_name__ ) a = self.sp_model.encode(__magic_name__ ) else: a = [self.preprocess_text(__magic_name__ ) for t in text] a = self.sp_model.encode(__magic_name__ ) if return_tensors is True or return_tensors == "pt": a = torch.tensor(__magic_name__ ) return token_ids def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Union[int, List[int]] ): '''simple docstring''' return self.sp_model.decode(__magic_name__ ) def lowerCamelCase__ ( self :int , __magic_name__ :"Conversation" ): '''simple docstring''' a = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()] a = ( F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(__magic_name__ ) + F'{self.bos_token}Bot:' ) return self.encode(text=__magic_name__ )
347
def __A ( __lowerCamelCase ) -> int: if not numbers: return 0 if not isinstance(__lowerCamelCase , (list, tuple) ) or not all( isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ): raise ValueError("""numbers must be an iterable of integers""" ) a = a = a = numbers[0] for i in range(1 , len(__lowerCamelCase ) ): # update the maximum and minimum subarray products a = numbers[i] if number < 0: a , a = min_till_now, max_till_now a = max(__lowerCamelCase , max_till_now * number ) a = min(__lowerCamelCase , min_till_now * number ) # update the maximum product found till now a = max(__lowerCamelCase , __lowerCamelCase ) return max_prod
347
1
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast __UpperCamelCase : str = datasets.utils.logging.get_logger(__name__) @dataclass class __lowerCAmelCase ( datasets.BuilderConfig ): UpperCamelCase__ = 10000 UpperCamelCase__ = None UpperCamelCase__ = None class __lowerCAmelCase ( datasets.ArrowBasedBuilder ): UpperCamelCase__ = ParquetConfig def lowerCamelCase__ ( self :Dict ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[str] ): '''simple docstring''' if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) a = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__magic_name__ , (str, list, tuple) ): a = data_files if isinstance(__magic_name__ , __magic_name__ ): a = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a = [dl_manager.iter_files(__magic_name__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] a = [] for split_name, files in data_files.items(): if isinstance(__magic_name__ , __magic_name__ ): a = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a = [dl_manager.iter_files(__magic_name__ ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(__magic_name__ ): with open(__magic_name__ , """rb""" ) as f: a = datasets.Features.from_arrow_schema(pq.read_schema(__magic_name__ ) ) break splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"""files""": files} ) ) return splits def lowerCamelCase__ ( self :Dict , __magic_name__ :pa.Table ): '''simple docstring''' if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example a = table_cast(__magic_name__ , self.info.features.arrow_schema ) return pa_table def lowerCamelCase__ ( self :str , __magic_name__ :List[str] ): '''simple docstring''' a = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' ) for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ): with open(__magic_name__ , """rb""" ) as f: a = pq.ParquetFile(__magic_name__ ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): a = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F'{file_idx}_{batch_idx}', self._cast_table(__magic_name__ ) except ValueError as e: logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' ) raise
347
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __UpperCamelCase : Optional[Any] = { "configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"], "feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"], "processing_wav2vec2": ["Wav2Vec2Processor"], "tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : str = [ "WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "Wav2Vec2ForAudioFrameClassification", "Wav2Vec2ForCTC", "Wav2Vec2ForMaskedLM", "Wav2Vec2ForPreTraining", "Wav2Vec2ForSequenceClassification", "Wav2Vec2ForXVector", "Wav2Vec2Model", "Wav2Vec2PreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Dict = [ "TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "TFWav2Vec2ForCTC", "TFWav2Vec2Model", "TFWav2Vec2PreTrainedModel", "TFWav2Vec2ForSequenceClassification", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Union[str, Any] = [ "FlaxWav2Vec2ForCTC", "FlaxWav2Vec2ForPreTraining", "FlaxWav2Vec2Model", "FlaxWav2Vec2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
347
1
import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __UpperCamelCase : Dict = logging.get_logger(__name__) class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = ['''input_ids''', '''attention_mask'''] def __init__( self :List[str] , __magic_name__ :int="</s>" , __magic_name__ :List[Any]="<unk>" , __magic_name__ :Optional[Any]="<pad>" , __magic_name__ :Optional[int]=125 , __magic_name__ :List[str]=None , **__magic_name__ :List[str] , ): '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: a = [F'<extra_id_{i}>' for i in range(__magic_name__ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens a = len(set(filter(lambda __magic_name__ : bool("""extra_id""" in str(__magic_name__ ) ) , __magic_name__ ) ) ) if extra_tokens != extra_ids: raise ValueError( F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are' """ provided to ByT5Tokenizer. In this case the additional_special_tokens must include the""" """ extra_ids tokens""" ) a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token super().__init__( eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , extra_ids=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , ) a = extra_ids a = 2**8 # utf is 8 bits # define special tokens dict a = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } a = len(self.special_tokens_encoder ) a = len(__magic_name__ ) for i, token in enumerate(__magic_name__ ): a = self.vocab_size + i - n a = {v: k for k, v in self.special_tokens_encoder.items()} @property def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def lowerCamelCase__ ( self :Any , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None , __magic_name__ :bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(__magic_name__ )) + [1] return ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1] def lowerCamelCase__ ( self :str , __magic_name__ :List[int] ): '''simple docstring''' if len(__magic_name__ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated' """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ): '''simple docstring''' a = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ): '''simple docstring''' a = self._add_eos_if_not_present(__magic_name__ ) if token_ids_a is None: return token_ids_a else: a = self._add_eos_if_not_present(__magic_name__ ) return token_ids_a + token_ids_a def lowerCamelCase__ ( self :List[str] , __magic_name__ :str ): '''simple docstring''' a = [chr(__magic_name__ ) for i in text.encode("""utf-8""" )] return tokens def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ): '''simple docstring''' if token in self.special_tokens_encoder: a = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: a = self.added_tokens_encoder[token] elif len(__magic_name__ ) != 1: a = self.unk_token_id else: a = ord(__magic_name__ ) + self._num_special_tokens return token_id def lowerCamelCase__ ( self :List[str] , __magic_name__ :Dict ): '''simple docstring''' if index in self.special_tokens_decoder: a = self.special_tokens_decoder[index] else: a = chr(index - self._num_special_tokens ) return token def lowerCamelCase__ ( self :Tuple , __magic_name__ :Optional[int] ): '''simple docstring''' a = b"""""" for token in tokens: if token in self.special_tokens_decoder: a = self.special_tokens_decoder[token].encode("""utf-8""" ) elif token in self.added_tokens_decoder: a = self.special_tokens_decoder[token].encode("""utf-8""" ) elif token in self.special_tokens_encoder: a = token.encode("""utf-8""" ) elif token in self.added_tokens_encoder: a = token.encode("""utf-8""" ) else: a = bytes([ord(__magic_name__ )] ) bstring += tok_string a = bstring.decode("""utf-8""" , errors="""ignore""" ) return string def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :str , __magic_name__ :Optional[str] = None ): '''simple docstring''' return ()
347
import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any: if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , __lowerCamelCase ) a = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: a = dataset_size < in_memory_max_size else: a = False a = is_small_dataset(__lowerCamelCase ) assert result == expected
347
1
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch __UpperCamelCase : int = random.Random() def __A ( __lowerCamelCase , __lowerCamelCase=1.0 , __lowerCamelCase=None , __lowerCamelCase=None ) -> Dict: if rng is None: a = global_rng a = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __lowerCAmelCase ( unittest.TestCase ): def __init__( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :str=7 , __magic_name__ :str=400 , __magic_name__ :Any=2000 , __magic_name__ :Optional[Any]=10 , __magic_name__ :Tuple=160 , __magic_name__ :Tuple=8 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :Optional[Any]=4000 , __magic_name__ :Dict=False , __magic_name__ :int=True , ): '''simple docstring''' a = parent a = batch_size a = min_seq_length a = max_seq_length a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) a = padding_value a = sampling_rate a = return_attention_mask a = do_normalize a = feature_size a = chunk_length a = hop_length def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Tuple=False , __magic_name__ :Dict=False ): '''simple docstring''' def _flatten(__magic_name__ :List[Any] ): return list(itertools.chain(*__magic_name__ ) ) if equal_length: a = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size a = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: a = [np.asarray(__magic_name__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): UpperCamelCase__ = WhisperFeatureExtractor if is_speech_available() else None def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = WhisperFeatureExtractionTester(self ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: a = feat_extract_first.save_pretrained(__magic_name__ )[0] check_json_file_has_correct_format(__magic_name__ ) a = self.feature_extraction_class.from_pretrained(__magic_name__ ) a = feat_extract_first.to_dict() a = feat_extract_second.to_dict() a = feat_extract_first.mel_filters a = feat_extract_second.mel_filters self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) ) self.assertEqual(__magic_name__ , __magic_name__ ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: a = os.path.join(__magic_name__ , """feat_extract.json""" ) feat_extract_first.to_json_file(__magic_name__ ) a = self.feature_extraction_class.from_json_file(__magic_name__ ) a = feat_extract_first.to_dict() a = feat_extract_second.to_dict() a = feat_extract_first.mel_filters a = feat_extract_second.mel_filters self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) ) self.assertEqual(__magic_name__ , __magic_name__ ) def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] a = [np.asarray(__magic_name__ ) for speech_input in speech_inputs] # Test feature size a = feature_extractor(__magic_name__ , padding="""max_length""" , return_tensors="""np""" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input a = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features a = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) # Test batched a = feature_extractor(__magic_name__ , return_tensors="""np""" ).input_features a = feature_extractor(__magic_name__ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ): self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. a = [floats_list((1, x) )[0] for x in (800, 800, 800)] a = np.asarray(__magic_name__ ) a = feature_extractor(__magic_name__ , return_tensors="""np""" ).input_features a = feature_extractor(__magic_name__ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ): self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) # Test truncation required a = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] a = [np.asarray(__magic_name__ ) for speech_input in speech_inputs] a = [x[: feature_extractor.n_samples] for x in speech_inputs] a = [np.asarray(__magic_name__ ) for speech_input in speech_inputs_truncated] a = feature_extractor(__magic_name__ , return_tensors="""np""" ).input_features a = feature_extractor(__magic_name__ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ): self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' import torch a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) a = np.random.rand(100 , 32 ).astype(np.floataa ) a = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: a = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) a = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowerCamelCase__ ( self :List[str] , __magic_name__ :Any ): '''simple docstring''' a = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech a = ds.sort("""id""" ).select(range(__magic_name__ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = torch.tensor( [ 0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951, 0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678, 0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554, -0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854 ] ) # fmt: on a = self._load_datasamples(1 ) a = WhisperFeatureExtractor() a = feature_extractor(__magic_name__ , return_tensors="""pt""" ).input_features self.assertEqual(input_features.shape , (1, 80, 3000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , __magic_name__ , atol=1E-4 ) ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) a = self._load_datasamples(1 )[0] a = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue a = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__magic_name__ )[0] self.assertTrue(np.all(np.mean(__magic_name__ ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(__magic_name__ ) - 1 ) < 1E-3 ) )
347
from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def __A ( __lowerCamelCase ) -> bool: a = int(number**0.5 ) return number == sq * sq def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> tuple[int, int]: a = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den a = x_den * y_den * z_den a = gcd(__lowerCamelCase , __lowerCamelCase ) top //= hcf bottom //= hcf return top, bottom def __A ( __lowerCamelCase = 35 ) -> int: a = set() a = 42 a = Fraction(0 ) a = 42 for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 a = x_num * y_den + x_den * y_num a = x_den * y_den a = gcd(__lowerCamelCase , __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: a = add_three( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) unique_s.add(__lowerCamelCase ) # n=2 a = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) a = x_den * x_den * y_den * y_den if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ): a = int(sqrt(__lowerCamelCase ) ) a = int(sqrt(__lowerCamelCase ) ) a = gcd(__lowerCamelCase , __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: a = add_three( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) unique_s.add(__lowerCamelCase ) # n=-1 a = x_num * y_num a = x_den * y_num + x_num * y_den a = gcd(__lowerCamelCase , __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: a = add_three( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) unique_s.add(__lowerCamelCase ) # n=2 a = x_num * x_num * y_num * y_num a = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ): a = int(sqrt(__lowerCamelCase ) ) a = int(sqrt(__lowerCamelCase ) ) a = gcd(__lowerCamelCase , __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: a = add_three( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) unique_s.add(__lowerCamelCase ) for num, den in unique_s: total += Fraction(__lowerCamelCase , __lowerCamelCase ) return total.denominator + total.numerator if __name__ == "__main__": print(F'{solution() = }')
347
1
class __lowerCAmelCase : def __init__( self :str , __magic_name__ :str = "" , __magic_name__ :bool = False ): '''simple docstring''' a = {} # A node will be a leaf if the tree contains its word a = is_leaf a = prefix def lowerCamelCase__ ( self :str , __magic_name__ :str ): '''simple docstring''' a = 0 for q, w in zip(self.prefix , __magic_name__ ): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :list[str] ): '''simple docstring''' for word in words: self.insert(__magic_name__ ) def lowerCamelCase__ ( self :int , __magic_name__ :str ): '''simple docstring''' if self.prefix == word: a = True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: a = RadixNode(prefix=__magic_name__ , is_leaf=__magic_name__ ) else: a = self.nodes[word[0]] a , a , a = incoming_node.match( __magic_name__ ) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(__magic_name__ ) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: a = remaining_prefix a = self.nodes[matching_string[0]] a = RadixNode(__magic_name__ , __magic_name__ ) a = aux_node if remaining_word == "": a = True else: self.nodes[matching_string[0]].insert(__magic_name__ ) def lowerCamelCase__ ( self :List[Any] , __magic_name__ :str ): '''simple docstring''' a = self.nodes.get(word[0] , __magic_name__ ) if not incoming_node: return False else: a , a , a = incoming_node.match( __magic_name__ ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(__magic_name__ ) def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :str ): '''simple docstring''' a = self.nodes.get(word[0] , __magic_name__ ) if not incoming_node: return False else: a , a , a = incoming_node.match( __magic_name__ ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(__magic_name__ ) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes ) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes ) == 1 and not self.is_leaf: a = list(self.nodes.values() )[0] a = merging_node.is_leaf self.prefix += merging_node.prefix a = merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes ) > 1: a = False # If there is 1 edge, we merge it with its child else: a = list(incoming_node.nodes.values() )[0] a = merging_node.is_leaf incoming_node.prefix += merging_node.prefix a = merging_node.nodes return True def lowerCamelCase__ ( self :List[Any] , __magic_name__ :int = 0 ): '''simple docstring''' if self.prefix != "": print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" ) for value in self.nodes.values(): value.print_tree(height + 1 ) def __A ( ) -> bool: a = """banana bananas bandana band apple all beast""".split() a = RadixNode() root.insert_many(__lowerCamelCase ) assert all(root.find(__lowerCamelCase ) for word in words ) assert not root.find("""bandanas""" ) assert not root.find("""apps""" ) root.delete("""all""" ) assert not root.find("""all""" ) root.delete("""banana""" ) assert not root.find("""banana""" ) assert root.find("""bananas""" ) return True def __A ( ) -> None: assert test_trie() def __A ( ) -> None: a = RadixNode() a = """banana bananas bandanas bandana band apple all beast""".split() root.insert_many(__lowerCamelCase ) print("""Words:""" , __lowerCamelCase ) print("""Tree:""" ) root.print_tree() if __name__ == "__main__": main()
347
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __lowerCAmelCase ( unittest.TestCase ): def __init__( self :List[str] , __magic_name__ :List[str] , __magic_name__ :List[Any]=13 , __magic_name__ :Any=7 , __magic_name__ :Optional[int]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :Any=99 , __magic_name__ :List[str]=32 , __magic_name__ :List[str]=5 , __magic_name__ :str=4 , __magic_name__ :str=37 , __magic_name__ :Optional[int]="gelu" , __magic_name__ :int=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :List[str]=512 , __magic_name__ :Tuple=16 , __magic_name__ :Tuple=2 , __magic_name__ :List[str]=0.02 , __magic_name__ :Any=4 , ): '''simple docstring''' a = parent a = batch_size a = seq_length a = is_training a = use_attention_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_choices def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_attention_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = self.prepare_config_and_inputs() a , a , a , a = config_and_inputs a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): UpperCamelCase__ = True UpperCamelCase__ = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = FlaxRoFormerModelTester(self ) @slow def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' for model_class_name in self.all_model_classes: a = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__magic_name__ ) a = model(np.ones((1, 1) ) ) self.assertIsNotNone(__magic_name__ ) @require_flax class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) a = jnp.array([[0, 1, 2, 3, 4, 5]] ) a = model(__magic_name__ )[0] a = 5_0000 a = (1, 6, vocab_size) self.assertEqual(output.shape , __magic_name__ ) a = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
347
1
import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset __UpperCamelCase : List[Any] = pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) __UpperCamelCase : Optional[Any] = dataset.iloc[:, 1:2].values __UpperCamelCase : Tuple = dataset.iloc[:, 2].values __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = train_test_split(X, y, test_size=0.2, random_state=0) __UpperCamelCase : int = PolynomialFeatures(degree=4) __UpperCamelCase : List[str] = poly_reg.fit_transform(X) __UpperCamelCase : List[Any] = LinearRegression() pol_reg.fit(X_poly, y) def __A ( ) -> List[str]: plt.scatter(__lowerCamelCase , __lowerCamelCase , color="""red""" ) plt.plot(__lowerCamelCase , pol_reg.predict(poly_reg.fit_transform(__lowerCamelCase ) ) , color="""blue""" ) plt.title("""Truth or Bluff (Linear Regression)""" ) plt.xlabel("""Position level""" ) plt.ylabel("""Salary""" ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
347
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __UpperCamelCase : Optional[int] = { "configuration_blenderbot": [ "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotOnnxConfig", ], "tokenization_blenderbot": ["BlenderbotTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : List[Any] = ["BlenderbotTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Union[str, Any] = [ "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotForCausalLM", "BlenderbotForConditionalGeneration", "BlenderbotModel", "BlenderbotPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = [ "TFBlenderbotForConditionalGeneration", "TFBlenderbotModel", "TFBlenderbotPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ "FlaxBlenderbotForConditionalGeneration", "FlaxBlenderbotModel", "FlaxBlenderbotPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys __UpperCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
347
1
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def __A ( __lowerCamelCase ) -> None: a , a = analyze_text(__lowerCamelCase ) a = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. a = sum(single_char_strings.values() ) # one length string a = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: a = single_char_strings[ch] a = my_str / all_sum my_fir_sum += prob * math.loga(__lowerCamelCase ) # entropy formula. # print entropy print(f'{round(-1 * my_fir_sum ):.1f}' ) # two len string a = sum(two_char_strings.values() ) a = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: a = cha + cha if sequence in two_char_strings: a = two_char_strings[sequence] a = int(__lowerCamelCase ) / all_sum my_sec_sum += prob * math.loga(__lowerCamelCase ) # print second entropy print(f'{round(-1 * my_sec_sum ):.1f}' ) # print the difference between them print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' ) def __A ( __lowerCamelCase ) -> tuple[dict, dict]: a = Counter() # type: ignore a = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(__lowerCamelCase ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def __A ( ) -> Optional[Any]: import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
347
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = (IPNDMScheduler,) UpperCamelCase__ = (('''num_inference_steps''', 50),) def lowerCamelCase__ ( self :Any , **__magic_name__ :Optional[Any] ): '''simple docstring''' a = {"""num_train_timesteps""": 1000} config.update(**__magic_name__ ) return config def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple=0 , **__magic_name__ :Optional[int] ): '''simple docstring''' a = dict(self.forward_default_kwargs ) a = kwargs.pop("""num_inference_steps""" , __magic_name__ ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__magic_name__ ) a = scheduler_class(**__magic_name__ ) scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residuals a = dummy_past_residuals[:] if time_step is None: a = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__magic_name__ ) a = scheduler_class.from_pretrained(__magic_name__ ) new_scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residuals a = dummy_past_residuals[:] a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[Any]=0 , **__magic_name__ :Any ): '''simple docstring''' a = dict(self.forward_default_kwargs ) a = kwargs.pop("""num_inference_steps""" , __magic_name__ ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__magic_name__ ) scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residuals (must be after setting timesteps) a = dummy_past_residuals[:] if time_step is None: a = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__magic_name__ ) a = scheduler_class.from_pretrained(__magic_name__ ) # copy over dummy past residuals new_scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residual (must be after setting timesteps) a = dummy_past_residuals[:] a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCamelCase__ ( self :Optional[Any] , **__magic_name__ :Optional[int] ): '''simple docstring''' a = self.scheduler_classes[0] a = self.get_scheduler_config(**__magic_name__ ) a = scheduler_class(**__magic_name__ ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter scheduler.set_timesteps(__magic_name__ ) for i, t in enumerate(scheduler.timesteps ): a = model(__magic_name__ , __magic_name__ ) a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample for i, t in enumerate(scheduler.timesteps ): a = model(__magic_name__ , __magic_name__ ) a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample return sample def lowerCamelCase__ ( self :str ): '''simple docstring''' a = dict(self.forward_default_kwargs ) a = kwargs.pop("""num_inference_steps""" , __magic_name__ ) for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__magic_name__ ) a = self.dummy_sample a = 0.1 * sample if num_inference_steps is not None and hasattr(__magic_name__ , """set_timesteps""" ): scheduler.set_timesteps(__magic_name__ ) elif num_inference_steps is not None and not hasattr(__magic_name__ , """set_timesteps""" ): a = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] a = dummy_past_residuals[:] a = scheduler.timesteps[5] a = scheduler.timesteps[6] a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=__magic_name__ , time_step=__magic_name__ ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=__magic_name__ , time_step=__magic_name__ ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.full_loop() a = torch.mean(torch.abs(__magic_name__ ) ) assert abs(result_mean.item() - 254_0529 ) < 10
347
1
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def __A ( ) -> Optional[Any]: a = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg""" a = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("""RGB""" ) return image def __A ( __lowerCamelCase ) -> List[Any]: a = [] # fmt: off # vision encoder rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") ) rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") ) rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") ) rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) ) rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') ) # QFormer rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") ) rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") ) # fmt: on return rename_keys def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]: a = dct.pop(__lowerCamelCase ) a = val def __A ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases a = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' ) a = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' ) # next, set bias in the state dict a = torch.cat((q_bias, torch.zeros_like(__lowerCamelCase , requires_grad=__lowerCamelCase ), v_bias) ) a = qkv_bias def __A ( __lowerCamelCase ) -> Any: a = 364 if """coco""" in model_name else 224 a = InstructBlipVisionConfig(image_size=__lowerCamelCase ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: a = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: a = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: a = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" , vocab_size=3_2001 ).to_dict() elif "vicuna-13b" in model_name: a = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" , vocab_size=3_2001 ).to_dict() else: raise ValueError("""Model name not supported""" ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 a = InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict() a = InstructBlipConfig(vision_config=__lowerCamelCase , text_config=__lowerCamelCase , qformer_config=__lowerCamelCase ) return config, image_size @torch.no_grad() def __A ( __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=False ) -> Dict: a = AutoTokenizer.from_pretrained("""bert-base-uncased""" , truncation_side="""left""" ) qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} ) if "t5" in model_name: a = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" , truncation_side="""left""" ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) a = LlamaTokenizerFast.from_pretrained( """huggyllama/llama-7b""" , truncation_side="""left""" , bos_token="""</s>""" , unk_token="""</s>""" ) tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} ) a , a = get_blipa_config(__lowerCamelCase ) a = InstructBlipForConditionalGeneration(__lowerCamelCase ).eval() a = { """instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""), """instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""), """instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""), """instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""), } a , a = model_name_to_original[model_name] # load original model print("""Loading original model...""" ) a = """cuda:1""" if torch.cuda.is_available() else """cpu""" a = """cuda:2""" if torch.cuda.is_available() else """cpu""" a , a , a = load_model_and_preprocess( name=__lowerCamelCase , model_type=__lowerCamelCase , is_eval=__lowerCamelCase , device=__lowerCamelCase ) original_model.eval() print("""Done!""" ) # update state dict keys a = original_model.state_dict() a = create_rename_keys(__lowerCamelCase ) for src, dest in rename_keys: rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): a = state_dict.pop(__lowerCamelCase ) if key.startswith("""Qformer.bert""" ): a = key.replace("""Qformer.bert""" , """qformer""" ) if "attention.self" in key: a = key.replace("""self""" , """attention""" ) if "llm_proj" in key: a = key.replace("""llm_proj""" , """language_projection""" ) if "t5_proj" in key: a = key.replace("""t5_proj""" , """language_projection""" ) if key.startswith("""llm_model""" ): a = key.replace("""llm_model""" , """language_model""" ) if key.startswith("""t5""" ): a = key.replace("""t5""" , """language""" ) a = val # read in qv biases read_in_q_v_bias(__lowerCamelCase , __lowerCamelCase ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase ) a = load_demo_image() a = """What is unusual about this image?""" # create processor a = BlipImageProcessor( size={"""height""": image_size, """width""": image_size} , image_mean=__lowerCamelCase , image_std=__lowerCamelCase ) a = InstructBlipProcessor( image_processor=__lowerCamelCase , tokenizer=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase , ) a = processor(images=__lowerCamelCase , text=__lowerCamelCase , return_tensors="""pt""" ).to(__lowerCamelCase ) # make sure processor creates exact same pixel values a = vis_processors["""eval"""](__lowerCamelCase ).unsqueeze(0 ).to(__lowerCamelCase ) a = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __lowerCamelCase ) original_model.to(__lowerCamelCase ) hf_model.to(__lowerCamelCase ) with torch.no_grad(): if "vicuna" in model_name: a = original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits a = hf_model(**__lowerCamelCase ).logits else: a = original_model( {"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits a = tokenizer("""\n""" , return_tensors="""pt""" ).input_ids.to(__lowerCamelCase ) a = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) a = hf_model(**__lowerCamelCase , labels=__lowerCamelCase ).logits print("""First values of original logits:""" , original_logits[0, :3, :3] ) print("""First values of HF logits:""" , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape a = 1E-4 if """vicuna""" in model_name else 1E-5 assert torch.allclose(original_logits.to(logits.device ) , __lowerCamelCase , atol=__lowerCamelCase ) print("""Looks ok!""" ) print("""Generating with original model...""" ) a = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print("""Generating with HF model...""" ) a = hf_model.generate( **__lowerCamelCase , do_sample=__lowerCamelCase , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? a = 2 print("""Original generation:""" , __lowerCamelCase ) a = processor.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) a = [text.strip() for text in output_text] print("""HF generation:""" , __lowerCamelCase ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__lowerCamelCase ) hf_model.save_pretrained(__lowerCamelCase ) if push_to_hub: processor.push_to_hub(f'Salesforce/{model_name}' ) hf_model.push_to_hub(f'Salesforce/{model_name}' ) if __name__ == "__main__": __UpperCamelCase : Optional[Any] = argparse.ArgumentParser() __UpperCamelCase : Optional[Any] = [ "instructblip-vicuna-7b", "instructblip-vicuna-13b", "instructblip-flan-t5-xl", "instructblip-flan-t5-xxl", ] parser.add_argument( "--model_name", default="instructblip-flan-t5-xl", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) __UpperCamelCase : List[str] = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
347
__UpperCamelCase : Dict = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def __A ( ) -> None: a = input("""Enter message: """ ) a = input("""Enter key [alphanumeric]: """ ) a = input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): a = """encrypt""" a = encrypt_message(__lowerCamelCase , __lowerCamelCase ) elif mode.lower().startswith("""d""" ): a = """decrypt""" a = decrypt_message(__lowerCamelCase , __lowerCamelCase ) print(f'\n{mode.title()}ed message:' ) print(__lowerCamelCase ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> str: return translate_message(__lowerCamelCase , __lowerCamelCase , """encrypt""" ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> str: return translate_message(__lowerCamelCase , __lowerCamelCase , """decrypt""" ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str: a = [] a = 0 a = key.upper() for symbol in message: a = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(__lowerCamelCase ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(__lowerCamelCase ): a = 0 else: translated.append(__lowerCamelCase ) return "".join(__lowerCamelCase ) if __name__ == "__main__": main()
347
1
from pathlib import Path import fire from tqdm import tqdm def __A ( __lowerCamelCase="ro" , __lowerCamelCase="en" , __lowerCamelCase="wmt16" , __lowerCamelCase=None ) -> None: try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError("""run pip install datasets""" ) a = f'{src_lang}-{tgt_lang}' print(f'Converting {dataset}-{pair}' ) a = datasets.load_dataset(__lowerCamelCase , __lowerCamelCase ) if save_dir is None: a = f'{dataset}-{pair}' a = Path(__lowerCamelCase ) save_dir.mkdir(exist_ok=__lowerCamelCase ) for split in ds.keys(): print(f'Splitting {split} with {ds[split].num_rows} records' ) # to save to val.source, val.target like summary datasets a = """val""" if split == """validation""" else split a = save_dir.joinpath(f'{fn}.source' ) a = save_dir.joinpath(f'{fn}.target' ) a = src_path.open("""w+""" ) a = tgt_path.open("""w+""" ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): a = x["""translation"""] src_fp.write(ex[src_lang] + """\n""" ) tgt_fp.write(ex[tgt_lang] + """\n""" ) print(f'Saved {dataset} dataset to {save_dir}' ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
347
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class __lowerCAmelCase ( unittest.TestCase ): def __init__( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Tuple=13 , __magic_name__ :List[Any]=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :List[str]=True , __magic_name__ :str=99 , __magic_name__ :Optional[Any]=32 , __magic_name__ :Union[str, Any]=5 , __magic_name__ :Any=4 , __magic_name__ :int=37 , __magic_name__ :Tuple="gelu" , __magic_name__ :List[str]=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :Tuple=512 , __magic_name__ :Dict=16 , __magic_name__ :Optional[int]=2 , __magic_name__ :Optional[int]=0.02 , __magic_name__ :Optional[Any]=4 , ): '''simple docstring''' a = parent a = batch_size a = seq_length a = is_training a = use_attention_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_choices def lowerCamelCase__ ( self :int ): '''simple docstring''' a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_attention_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = self.prepare_config_and_inputs() a , a , a , a = config_and_inputs a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = self.prepare_config_and_inputs() a , a , a , a = config_and_inputs a = True a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): UpperCamelCase__ = True UpperCamelCase__ = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = FlaxRobertaModelTester(self ) @slow def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: a = model_class_name.from_pretrained("""roberta-base""" , from_pt=__magic_name__ ) a = model(np.ones((1, 1) ) ) self.assertIsNotNone(__magic_name__ )
347
1
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __lowerCAmelCase ( unittest.TestCase ): def __init__( self :List[str] , __magic_name__ :List[str] , __magic_name__ :List[Any]=13 , __magic_name__ :Any=7 , __magic_name__ :Optional[int]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :Any=99 , __magic_name__ :List[str]=32 , __magic_name__ :List[str]=5 , __magic_name__ :str=4 , __magic_name__ :str=37 , __magic_name__ :Optional[int]="gelu" , __magic_name__ :int=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :List[str]=512 , __magic_name__ :Tuple=16 , __magic_name__ :Tuple=2 , __magic_name__ :List[str]=0.02 , __magic_name__ :Any=4 , ): '''simple docstring''' a = parent a = batch_size a = seq_length a = is_training a = use_attention_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_choices def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_attention_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = self.prepare_config_and_inputs() a , a , a , a = config_and_inputs a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): UpperCamelCase__ = True UpperCamelCase__ = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = FlaxRoFormerModelTester(self ) @slow def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' for model_class_name in self.all_model_classes: a = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__magic_name__ ) a = model(np.ones((1, 1) ) ) self.assertIsNotNone(__magic_name__ ) @require_flax class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) a = jnp.array([[0, 1, 2, 3, 4, 5]] ) a = model(__magic_name__ )[0] a = 5_0000 a = (1, 6, vocab_size) self.assertEqual(output.shape , __magic_name__ ) a = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
347
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline __UpperCamelCase : Any = datasets.utils.logging.get_logger(__name__) @dataclass class __lowerCAmelCase ( datasets.BuilderConfig ): UpperCamelCase__ = None UpperCamelCase__ = "utf-8" UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = True # deprecated UpperCamelCase__ = None # deprecated UpperCamelCase__ = 10 << 20 # 10MB UpperCamelCase__ = None class __lowerCAmelCase ( datasets.ArrowBasedBuilder ): UpperCamelCase__ = JsonConfig def lowerCamelCase__ ( self :str ): '''simple docstring''' if self.config.block_size is not None: logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" ) a = self.config.block_size if self.config.use_threads is not True: logger.warning( """The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" ) if self.config.newlines_in_values is not None: raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" ) return datasets.DatasetInfo(features=self.config.features ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ): '''simple docstring''' if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) a = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__magic_name__ , (str, list, tuple) ): a = data_files if isinstance(__magic_name__ , __magic_name__ ): a = [files] a = [dl_manager.iter_files(__magic_name__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] a = [] for split_name, files in data_files.items(): if isinstance(__magic_name__ , __magic_name__ ): a = [files] a = [dl_manager.iter_files(__magic_name__ ) for file in files] splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"""files""": files} ) ) return splits def lowerCamelCase__ ( self :List[str] , __magic_name__ :pa.Table ): '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): a = self.config.features.arrow_schema.field(__magic_name__ ).type a = pa_table.append_column(__magic_name__ , pa.array([None] * len(__magic_name__ ) , type=__magic_name__ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example a = table_cast(__magic_name__ , self.config.features.arrow_schema ) return pa_table def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Union[str, Any] ): '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: a = json.load(__magic_name__ ) # We keep only the field we are interested in a = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(__magic_name__ , (list, tuple) ): a = set().union(*[row.keys() for row in dataset] ) a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys} else: a = dataset a = pa.Table.from_pydict(__magic_name__ ) yield file_idx, self._cast_table(__magic_name__ ) # If the file has one json object per line else: with open(__magic_name__ , """rb""" ) as f: a = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small a = max(self.config.chunksize // 32 , 16 << 10 ) a = ( self.config.encoding_errors if self.config.encoding_errors is not None else """strict""" ) while True: a = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(__magic_name__ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": a = batch.decode(self.config.encoding , errors=__magic_name__ ).encode("""utf-8""" ) try: while True: try: a = paj.read_json( io.BytesIO(__magic_name__ ) , read_options=paj.ReadOptions(block_size=__magic_name__ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(__magic_name__ , pa.ArrowInvalid ) and "straddling" not in str(__magic_name__ ) or block_size > len(__magic_name__ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F'Batch of {len(__magic_name__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( __magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: a = json.load(__magic_name__ ) except json.JSONDecodeError: logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(__magic_name__ , __magic_name__ ): # list is the only sequence type supported in JSON try: a = set().union(*[row.keys() for row in dataset] ) a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys} a = pa.Table.from_pydict(__magic_name__ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' ) raise ValueError(F'Not able to read records in the JSON file at {file}.' ) from None yield file_idx, self._cast_table(__magic_name__ ) break else: logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' ) raise ValueError( F'Not able to read records in the JSON file at {file}. ' F'You should probably indicate the field of the JSON file containing your records. ' F'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ' F'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(__magic_name__ ) batch_idx += 1
347
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCamelCase : List[Any] = logging.get_logger(__name__) __UpperCamelCase : Optional[Any] = { "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json", "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json", "junnyu/roformer_chinese_char_small": ( "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json" ), "junnyu/roformer_chinese_char_base": ( "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json" ), "junnyu/roformer_small_discriminator": ( "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json" ), "junnyu/roformer_small_generator": ( "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json" ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = '''roformer''' def __init__( self :Optional[int] , __magic_name__ :Optional[int]=5_0000 , __magic_name__ :Optional[Any]=None , __magic_name__ :int=768 , __magic_name__ :List[Any]=12 , __magic_name__ :Any=12 , __magic_name__ :Any=3072 , __magic_name__ :Any="gelu" , __magic_name__ :int=0.1 , __magic_name__ :Any=0.1 , __magic_name__ :int=1536 , __magic_name__ :Optional[Any]=2 , __magic_name__ :Any=0.02 , __magic_name__ :Tuple=1E-1_2 , __magic_name__ :Tuple=0 , __magic_name__ :List[Any]=False , __magic_name__ :int=True , **__magic_name__ :List[str] , ): '''simple docstring''' super().__init__(pad_token_id=__magic_name__ , **__magic_name__ ) a = vocab_size a = hidden_size if embedding_size is None else embedding_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = initializer_range a = layer_norm_eps a = rotary_value a = use_cache class __lowerCAmelCase ( __magic_name__ ): @property def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' if self.task == "multiple-choice": a = {0: """batch""", 1: """choice""", 2: """sequence"""} else: a = {0: """batch""", 1: """sequence"""} a = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
347
import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __UpperCamelCase : Dict = logging.get_logger(__name__) class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = ['''input_ids''', '''attention_mask'''] def __init__( self :List[str] , __magic_name__ :int="</s>" , __magic_name__ :List[Any]="<unk>" , __magic_name__ :Optional[Any]="<pad>" , __magic_name__ :Optional[int]=125 , __magic_name__ :List[str]=None , **__magic_name__ :List[str] , ): '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: a = [F'<extra_id_{i}>' for i in range(__magic_name__ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens a = len(set(filter(lambda __magic_name__ : bool("""extra_id""" in str(__magic_name__ ) ) , __magic_name__ ) ) ) if extra_tokens != extra_ids: raise ValueError( F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are' """ provided to ByT5Tokenizer. In this case the additional_special_tokens must include the""" """ extra_ids tokens""" ) a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token super().__init__( eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , extra_ids=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , ) a = extra_ids a = 2**8 # utf is 8 bits # define special tokens dict a = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } a = len(self.special_tokens_encoder ) a = len(__magic_name__ ) for i, token in enumerate(__magic_name__ ): a = self.vocab_size + i - n a = {v: k for k, v in self.special_tokens_encoder.items()} @property def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def lowerCamelCase__ ( self :Any , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None , __magic_name__ :bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(__magic_name__ )) + [1] return ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1] def lowerCamelCase__ ( self :str , __magic_name__ :List[int] ): '''simple docstring''' if len(__magic_name__ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated' """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ): '''simple docstring''' a = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ): '''simple docstring''' a = self._add_eos_if_not_present(__magic_name__ ) if token_ids_a is None: return token_ids_a else: a = self._add_eos_if_not_present(__magic_name__ ) return token_ids_a + token_ids_a def lowerCamelCase__ ( self :List[str] , __magic_name__ :str ): '''simple docstring''' a = [chr(__magic_name__ ) for i in text.encode("""utf-8""" )] return tokens def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ): '''simple docstring''' if token in self.special_tokens_encoder: a = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: a = self.added_tokens_encoder[token] elif len(__magic_name__ ) != 1: a = self.unk_token_id else: a = ord(__magic_name__ ) + self._num_special_tokens return token_id def lowerCamelCase__ ( self :List[str] , __magic_name__ :Dict ): '''simple docstring''' if index in self.special_tokens_decoder: a = self.special_tokens_decoder[index] else: a = chr(index - self._num_special_tokens ) return token def lowerCamelCase__ ( self :Tuple , __magic_name__ :Optional[int] ): '''simple docstring''' a = b"""""" for token in tokens: if token in self.special_tokens_decoder: a = self.special_tokens_decoder[token].encode("""utf-8""" ) elif token in self.added_tokens_decoder: a = self.special_tokens_decoder[token].encode("""utf-8""" ) elif token in self.special_tokens_encoder: a = token.encode("""utf-8""" ) elif token in self.added_tokens_encoder: a = token.encode("""utf-8""" ) else: a = bytes([ord(__magic_name__ )] ) bstring += tok_string a = bstring.decode("""utf-8""" , errors="""ignore""" ) return string def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :str , __magic_name__ :Optional[str] = None ): '''simple docstring''' return ()
347
1
import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py __UpperCamelCase : List[Any] = "src/transformers" __UpperCamelCase : List[Any] = "docs/source/en" __UpperCamelCase : Tuple = "." def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str: with open(__lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: a = f.readlines() # Find the start prompt. a = 0 while not lines[start_index].startswith(__lowerCamelCase ): start_index += 1 start_index += 1 a = start_index while not lines[end_index].startswith(__lowerCamelCase ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | __UpperCamelCase : int = "Model|Encoder|Decoder|ForConditionalGeneration" # Regexes that match TF/Flax/PT model names. __UpperCamelCase : Tuple = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") __UpperCamelCase : Union[str, Any] = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. __UpperCamelCase : List[Any] = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # This is to make sure the transformers module imported is the one in the repo. __UpperCamelCase : int = direct_transformers_import(TRANSFORMERS_PATH) def __A ( __lowerCamelCase ) -> Tuple: a = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , __lowerCamelCase ) return [m.group(0 ) for m in matches] def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: a = 2 if text == """✅""" or text == """❌""" else len(__lowerCamelCase ) a = (width - text_length) // 2 a = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def __A ( ) -> Union[str, Any]: a = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES a = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } a = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. a = collections.defaultdict(__lowerCamelCase ) a = collections.defaultdict(__lowerCamelCase ) a = collections.defaultdict(__lowerCamelCase ) a = collections.defaultdict(__lowerCamelCase ) a = collections.defaultdict(__lowerCamelCase ) # Let's lookup through all transformers object (once). for attr_name in dir(__lowerCamelCase ): a = None if attr_name.endswith("""Tokenizer""" ): a = slow_tokenizers a = attr_name[:-9] elif attr_name.endswith("""TokenizerFast""" ): a = fast_tokenizers a = attr_name[:-13] elif _re_tf_models.match(__lowerCamelCase ) is not None: a = tf_models a = _re_tf_models.match(__lowerCamelCase ).groups()[0] elif _re_flax_models.match(__lowerCamelCase ) is not None: a = flax_models a = _re_flax_models.match(__lowerCamelCase ).groups()[0] elif _re_pt_models.match(__lowerCamelCase ) is not None: a = pt_models a = _re_pt_models.match(__lowerCamelCase ).groups()[0] if lookup_dict is not None: while len(__lowerCamelCase ) > 0: if attr_name in model_name_to_prefix.values(): a = True break # Try again after removing the last word in the name a = """""".join(camel_case_split(__lowerCamelCase )[:-1] ) # Let's build that table! a = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) a = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). a = [len(__lowerCamelCase ) + 2 for c in columns] a = max([len(__lowerCamelCase ) for name in model_names] ) + 2 # Build the table per se a = """|""" + """|""".join([_center_text(__lowerCamelCase , __lowerCamelCase ) for c, w in zip(__lowerCamelCase , __lowerCamelCase )] ) + """|\n""" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n" a = {True: """✅""", False: """❌"""} for name in model_names: a = model_name_to_prefix[name] a = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(__lowerCamelCase , __lowerCamelCase ) for l, w in zip(__lowerCamelCase , __lowerCamelCase )] ) + "|\n" return table def __A ( __lowerCamelCase=False ) -> List[Any]: a , a , a , a = _find_text_in_file( filename=os.path.join(__lowerCamelCase , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , ) a = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(__lowerCamelCase , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( """The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" ) if __name__ == "__main__": __UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") __UpperCamelCase : List[Any] = parser.parse_args() check_model_table(args.fix_and_overwrite)
347
import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __lowerCAmelCase : def __init__( self :Optional[int] , __magic_name__ :str , __magic_name__ :int=2 , __magic_name__ :List[str]=3 , __magic_name__ :Optional[int]=4 , __magic_name__ :str=2 , __magic_name__ :Any=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Dict=True , __magic_name__ :List[Any]=99 , __magic_name__ :Dict=36 , __magic_name__ :Optional[Any]=3 , __magic_name__ :str=4 , __magic_name__ :Optional[Any]=37 , __magic_name__ :Dict="gelu" , __magic_name__ :Any=0.1 , __magic_name__ :Union[str, Any]=0.1 , __magic_name__ :Dict=512 , __magic_name__ :str=16 , __magic_name__ :List[Any]=2 , __magic_name__ :Tuple=0.02 , __magic_name__ :Any=6 , __magic_name__ :Optional[int]=6 , __magic_name__ :Tuple=3 , __magic_name__ :str=4 , __magic_name__ :List[str]=None , __magic_name__ :str=1000 , ): '''simple docstring''' a = parent a = batch_size a = num_channels a = image_size a = patch_size a = text_seq_length a = is_training a = use_input_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = coordinate_size a = shape_size a = num_labels a = num_choices a = scope a = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) a = text_seq_length a = (image_size // patch_size) ** 2 + 1 a = self.text_seq_length + self.image_seq_length def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) a = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: a = bbox[i, j, 3] a = bbox[i, j, 1] a = t if bbox[i, j, 2] < bbox[i, j, 0]: a = bbox[i, j, 2] a = bbox[i, j, 0] a = t a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a = None if self.use_input_mask: a = random_attention_mask([self.batch_size, self.text_seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) a = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowerCamelCase__ ( self :int , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :List[str] , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int ): '''simple docstring''' a = LayoutLMvaModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() # text + image a = model(__magic_name__ , pixel_values=__magic_name__ ) a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , token_type_ids=__magic_name__ ) a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only a = model(__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only a = model(pixel_values=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :List[Any] , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :List[str] , __magic_name__ :List[str] ): '''simple docstring''' a = self.num_labels a = LayoutLMvaForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :Dict , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :List[str] , __magic_name__ :Tuple ): '''simple docstring''' a = self.num_labels a = LayoutLMvaForTokenClassification(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :str , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :str , __magic_name__ :List[str] , __magic_name__ :Optional[int] , __magic_name__ :Optional[Any] ): '''simple docstring''' a = LayoutLMvaForQuestionAnswering(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) = config_and_inputs a = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) UpperCamelCase__ = ( {'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel} if is_torch_available() else {} ) def lowerCamelCase__ ( self :List[str] , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :List[Any] ): '''simple docstring''' return True def lowerCamelCase__ ( self :int ): '''simple docstring''' a = LayoutLMvaModelTester(self ) a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :Any=False ): '''simple docstring''' a = copy.deepcopy(__magic_name__ ) if model_class in get_values(__magic_name__ ): a = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(__magic_name__ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__magic_name__ ): a = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) elif model_class in get_values(__magic_name__ ): a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) elif model_class in [ *get_values(__magic_name__ ), ]: a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) elif model_class in [ *get_values(__magic_name__ ), ]: a = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__magic_name__ , ) return inputs_dict def lowerCamelCase__ ( self :str ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a = type self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__magic_name__ ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__magic_name__ ) @slow def lowerCamelCase__ ( self :int ): '''simple docstring''' for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = LayoutLMvaModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def __A ( ) -> str: a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None @slow def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(__magic_name__ ) a = self.default_image_processor a = prepare_img() a = image_processor(images=__magic_name__ , return_tensors="""pt""" ).pixel_values.to(__magic_name__ ) a = torch.tensor([[1, 2]] ) a = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass a = model( input_ids=input_ids.to(__magic_name__ ) , bbox=bbox.to(__magic_name__ ) , pixel_values=pixel_values.to(__magic_name__ ) , ) # verify the logits a = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ ) a = torch.tensor( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
347
1
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = '''''' UpperCamelCase__ = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) UpperCamelCase__ = None # compression type in fsspec. ex: "gzip" UpperCamelCase__ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self :str , __magic_name__ :str = "" , __magic_name__ :Optional[str] = None , __magic_name__ :Optional[dict] = None , **__magic_name__ :Union[str, Any] ): '''simple docstring''' super().__init__(self , **__magic_name__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode a = fsspec.open( __magic_name__ , mode="""rb""" , protocol=__magic_name__ , compression=self.compression , client_kwargs={ """requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459 """trust_env""": True, # Enable reading proxy env variables. **(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) a = os.path.basename(self.file.path.split("""::""" )[0] ) a = ( self.compressed_name[: self.compressed_name.rindex(""".""" )] if """.""" in self.compressed_name else self.compressed_name ) a = None @classmethod def lowerCamelCase__ ( cls :Dict , __magic_name__ :Union[str, Any] ): '''simple docstring''' return super()._strip_protocol(__magic_name__ ).lstrip("""/""" ) def lowerCamelCase__ ( self :str ): '''simple docstring''' if self.dir_cache is None: a = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name} a = {f["""name"""]: f} def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :str ): '''simple docstring''' return self.file.open().read() def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :str , __magic_name__ :str = "rb" , __magic_name__ :List[str]=None , __magic_name__ :Optional[Any]=True , __magic_name__ :Dict=None , **__magic_name__ :Tuple , ): '''simple docstring''' a = self._strip_protocol(__magic_name__ ) if mode != "rb": raise ValueError(F'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' ) return self.file.open() class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = '''bz2''' UpperCamelCase__ = '''bz2''' UpperCamelCase__ = '''.bz2''' class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = '''gzip''' UpperCamelCase__ = '''gzip''' UpperCamelCase__ = '''.gz''' class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = '''lz4''' UpperCamelCase__ = '''lz4''' UpperCamelCase__ = '''.lz4''' class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = '''xz''' UpperCamelCase__ = '''xz''' UpperCamelCase__ = '''.xz''' class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = '''zstd''' UpperCamelCase__ = '''zstd''' UpperCamelCase__ = '''.zst''' def __init__( self :List[str] , __magic_name__ :str , __magic_name__ :str = "rb" , __magic_name__ :Optional[str] = None , __magic_name__ :Optional[dict] = None , __magic_name__ :int = DEFAULT_BLOCK_SIZE , **__magic_name__ :str , ): '''simple docstring''' super().__init__( fo=__magic_name__ , mode=__magic_name__ , target_protocol=__magic_name__ , target_options=__magic_name__ , block_size=__magic_name__ , **__magic_name__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 a = self.file.__enter__ class __lowerCAmelCase : def __init__( self :Optional[Any] , __magic_name__ :Union[str, Any] ): '''simple docstring''' a = file_ def __enter__( self :Optional[Any] ): '''simple docstring''' self._file.__enter__() return self def __exit__( self :str , *__magic_name__ :int , **__magic_name__ :Union[str, Any] ): '''simple docstring''' self._file.__exit__(*__magic_name__ , **__magic_name__ ) def __iter__( self :Optional[int] ): '''simple docstring''' return iter(self._file ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' return next(self._file ) def __getattr__( self :Optional[int] , __magic_name__ :Optional[Any] ): '''simple docstring''' return getattr(self._file , __magic_name__ ) def fixed_enter(*__magic_name__ :str , **__magic_name__ :Optional[int] ): return WrappedFile(_enter(*__magic_name__ , **__magic_name__ ) ) a = fixed_enter
347
from copy import deepcopy class __lowerCAmelCase : def __init__( self :Union[str, Any] , __magic_name__ :list[int] | None = None , __magic_name__ :int | None = None ): '''simple docstring''' if arr is None and size is not None: a = size a = [0] * size elif arr is not None: self.init(__magic_name__ ) else: raise ValueError("""Either arr or size must be specified""" ) def lowerCamelCase__ ( self :Dict , __magic_name__ :list[int] ): '''simple docstring''' a = len(__magic_name__ ) a = deepcopy(__magic_name__ ) for i in range(1 , self.size ): a = self.next_(__magic_name__ ) if j < self.size: self.tree[j] += self.tree[i] def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): a = self.next_(__magic_name__ ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def lowerCamelCase__ ( __magic_name__ :int ): '''simple docstring''' return index + (index & (-index)) @staticmethod def lowerCamelCase__ ( __magic_name__ :int ): '''simple docstring''' return index - (index & (-index)) def lowerCamelCase__ ( self :Any , __magic_name__ :int , __magic_name__ :int ): '''simple docstring''' if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value a = self.next_(__magic_name__ ) def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :int , __magic_name__ :int ): '''simple docstring''' self.add(__magic_name__ , value - self.get(__magic_name__ ) ) def lowerCamelCase__ ( self :int , __magic_name__ :int ): '''simple docstring''' if right == 0: return 0 a = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] a = self.prev(__magic_name__ ) return result def lowerCamelCase__ ( self :int , __magic_name__ :int , __magic_name__ :int ): '''simple docstring''' return self.prefix(__magic_name__ ) - self.prefix(__magic_name__ ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :int ): '''simple docstring''' return self.query(__magic_name__ , index + 1 ) def lowerCamelCase__ ( self :Dict , __magic_name__ :int ): '''simple docstring''' value -= self.tree[0] if value < 0: return -1 a = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 a = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
347
1
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def __A ( *__lowerCamelCase ) -> Tuple: if not isinstance(__lowerCamelCase , __lowerCamelCase ): a = list(__lowerCamelCase ) for i in range(len(__lowerCamelCase ) ): a = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def __A ( __lowerCamelCase ) -> bool: a = [ """CUDA out of memory.""", # CUDA OOM """cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU """DefaultCPUAllocator: can't allocate memory""", # CPU OOM ] if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def __A ( __lowerCamelCase = None , __lowerCamelCase = 128 ) -> List[str]: if function is None: return functools.partial(__lowerCamelCase , starting_batch_size=__lowerCamelCase ) a = starting_batch_size def decorator(*__lowerCamelCase , **__lowerCamelCase ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() a = list(inspect.signature(__lowerCamelCase ).parameters.keys() ) # Guard against user error if len(__lowerCamelCase ) < (len(__lowerCamelCase ) + 1): a = """, """.join([f'{arg}={value}' for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( f'Batch size was passed into `{function.__name__}` as the first argument when called.' f'Remove this as the decorator already does so: `{function.__name__}({arg_str})`' ) while True: if batch_size == 0: raise RuntimeError("""No executable batch size found, reached zero.""" ) try: return function(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) except Exception as e: if should_reduce_batch_size(__lowerCamelCase ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
347
from __future__ import annotations from typing import Generic, TypeVar __UpperCamelCase : Union[str, Any] = TypeVar("T") class __lowerCAmelCase ( Generic[T] ): def __init__( self :Tuple , __magic_name__ :T ): '''simple docstring''' a = data a = self a = 0 class __lowerCAmelCase ( Generic[T] ): def __init__( self :Tuple ): '''simple docstring''' a = {} def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :T ): '''simple docstring''' a = DisjointSetTreeNode(__magic_name__ ) def lowerCamelCase__ ( self :List[Any] , __magic_name__ :T ): '''simple docstring''' a = self.map[data] if elem_ref != elem_ref.parent: a = self.find_set(elem_ref.parent.data ) return elem_ref.parent def lowerCamelCase__ ( self :List[Any] , __magic_name__ :DisjointSetTreeNode[T] , __magic_name__ :DisjointSetTreeNode[T] ): '''simple docstring''' if nodea.rank > nodea.rank: a = nodea else: a = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :T , __magic_name__ :T ): '''simple docstring''' self.link(self.find_set(__magic_name__ ) , self.find_set(__magic_name__ ) ) class __lowerCAmelCase ( Generic[T] ): def __init__( self :Union[str, Any] ): '''simple docstring''' a = {} def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :T ): '''simple docstring''' if node not in self.connections: a = {} def lowerCamelCase__ ( self :Any , __magic_name__ :T , __magic_name__ :T , __magic_name__ :int ): '''simple docstring''' self.add_node(__magic_name__ ) self.add_node(__magic_name__ ) a = weight a = weight def lowerCamelCase__ ( self :int ): '''simple docstring''' a = [] a = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda __magic_name__ : x[2] ) # creating the disjoint set a = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(__magic_name__ ) # MST generation a = 0 a = 0 a = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: a , a , a = edges[index] index += 1 a = disjoint_set.find_set(__magic_name__ ) a = disjoint_set.find_set(__magic_name__ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(__magic_name__ , __magic_name__ , __magic_name__ ) disjoint_set.union(__magic_name__ , __magic_name__ ) return graph
347
1
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase : List[str] = logging.get_logger(__name__) def __A ( __lowerCamelCase ) -> Dict: a = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) a = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , __lowerCamelCase ) if matches: a = float(matches[1] ) a = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". a = 1001 a = """imagenet-1k-id2label.json""" a = """huggingface/label-files""" a = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) ) a = {int(__lowerCamelCase ) + 1: v for k, v in idalabel.items()} a = """background""" a = idalabel a = {v: k for k, v in idalabel.items()} return config def __A ( ) -> Tuple: a = """http://images.cocodataset.org/val2017/000000039769.jpg""" a = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ) return im @torch.no_grad() def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> List[Any]: a = get_mobilenet_va_config(__lowerCamelCase ) # Load 🤗 model a = MobileNetVaForImageClassification(__lowerCamelCase ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor a = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , ) a = image_processor(images=prepare_img() , return_tensors="""pt""" ) a = model(**__lowerCamelCase ) a = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": a = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": a = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: a = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1E-4 ) Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(__lowerCamelCase ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(__lowerCamelCase ) if push_to_hub: print("""Pushing to the hub...""" ) a = """google/""" + model_name image_processor.push_to_hub(__lowerCamelCase ) model.push_to_hub(__lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="mobilenet_v1_1.0_224", type=str, help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.", ) parser.add_argument( "--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)." ) parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) __UpperCamelCase : Tuple = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
347
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = tempfile.mkdtemp() a = BlipImageProcessor() a = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) a = BlipProcessor(__magic_name__ , __magic_name__ ) processor.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self :List[Any] , **__magic_name__ :Union[str, Any] ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer def lowerCamelCase__ ( self :str , **__magic_name__ :List[str] ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor def lowerCamelCase__ ( self :int ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] a = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) a = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) a = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 ) a = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__magic_name__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __magic_name__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __magic_name__ ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = self.prepare_image_inputs() a = image_processor(__magic_name__ , return_tensors="""np""" ) a = processor(images=__magic_name__ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = """lower newer""" a = processor(text=__magic_name__ ) a = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = """lower newer""" a = self.prepare_image_inputs() a = processor(text=__magic_name__ , images=__magic_name__ ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(__magic_name__ ): processor() def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a = processor.batch_decode(__magic_name__ ) a = tokenizer.batch_decode(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = """lower newer""" a = self.prepare_image_inputs() a = processor(text=__magic_name__ , images=__magic_name__ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
347
1
import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class __lowerCAmelCase ( tf.keras.layers.Layer ): def __init__( self :Any , __magic_name__ :Dict[str, int] , __magic_name__ :List[str] , __magic_name__ :int = None , __magic_name__ :int = None ): '''simple docstring''' super().__init__() a = pad_token_id a = max_length a = vocab a = merges a = BytePairTokenizer(__magic_name__ , __magic_name__ , sequence_length=__magic_name__ ) @classmethod def lowerCamelCase__ ( cls :str , __magic_name__ :GPTaTokenizer , *__magic_name__ :Optional[Any] , **__magic_name__ :Any ): '''simple docstring''' a = [""" """.join(__magic_name__ ) for m in tokenizer.bpe_ranks.keys()] a = tokenizer.get_vocab() return cls(__magic_name__ , __magic_name__ , *__magic_name__ , **__magic_name__ ) @classmethod def lowerCamelCase__ ( cls :Tuple , __magic_name__ :Union[str, os.PathLike] , *__magic_name__ :int , **__magic_name__ :Dict ): '''simple docstring''' a = GPTaTokenizer.from_pretrained(__magic_name__ , *__magic_name__ , **__magic_name__ ) return cls.from_tokenizer(__magic_name__ , *__magic_name__ , **__magic_name__ ) @classmethod def lowerCamelCase__ ( cls :Dict , __magic_name__ :List[str] ): '''simple docstring''' return cls(**__magic_name__ ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :str , __magic_name__ :int = None ): '''simple docstring''' a = self.tf_tokenizer(__magic_name__ ) a = tf.ones_like(__magic_name__ ) if self.pad_token_id is not None: # pad the tokens up to max length a = max_length if max_length is not None else self.max_length if max_length is not None: a , a = pad_model_inputs( __magic_name__ , max_seq_length=__magic_name__ , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
347
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) __UpperCamelCase : int = { "shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json", # See all Nat models at https://huggingface.co/models?filter=nat } class __lowerCAmelCase ( __magic_name__ , __magic_name__ ): UpperCamelCase__ = '''nat''' UpperCamelCase__ = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self :Any , __magic_name__ :int=4 , __magic_name__ :Dict=3 , __magic_name__ :List[str]=64 , __magic_name__ :Optional[int]=[3, 4, 6, 5] , __magic_name__ :int=[2, 4, 8, 16] , __magic_name__ :str=7 , __magic_name__ :Tuple=3.0 , __magic_name__ :Dict=True , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.1 , __magic_name__ :Optional[Any]="gelu" , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :Tuple=1E-5 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :int=None , __magic_name__ :Any=None , **__magic_name__ :Dict , ): '''simple docstring''' super().__init__(**__magic_name__ ) a = patch_size a = num_channels a = embed_dim a = depths a = len(__magic_name__ ) a = num_heads a = kernel_size a = mlp_ratio a = qkv_bias a = hidden_dropout_prob a = attention_probs_dropout_prob a = drop_path_rate a = hidden_act a = layer_norm_eps a = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model a = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) ) a = layer_scale_init_value a = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(__magic_name__ ) + 1 )] a , a = get_aligned_output_features_output_indices( out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
347
1
def __A ( __lowerCamelCase = 1000 ) -> int: return sum(e for e in range(3 , __lowerCamelCase ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F'{solution() = }')
347
import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]: if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer a = flax_key_tuple[:-1] + ("""weight""",) a = torch.permute(__lowerCamelCase , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ): # linear layer a = flax_key_tuple[:-1] + ("""weight""",) a = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: a = flax_key_tuple[:-1] + ("""weight""",) return flax_key_tuple, flax_tensor def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]: if "metadata" in layer: a = layer.split("""metadata""" ) a = """""".join(split_layer[0] )[:-1] a = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )] elif "kvstore" in layer: a = layer.split("""kvstore""" ) a = """""".join(split_layer[0] )[:-1] a = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )] else: a = layer.split("""/""" ) a = """/""".join(split_layer[:-1] ) a = (split_layer[-1],) if "kvstore/path" in layer: a = f'{switch_checkpoint_path}/{checkpoint_info[layer]}' elif "kvstore/driver" in layer: a = """file""" else: a = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def __A ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]: a = rename_keys(__lowerCamelCase ) a = {} for k, v in current_block.items(): a = v a = new_current_block torch.save(__lowerCamelCase , __lowerCamelCase ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = WEIGHTS_NAME ) -> List[str]: a = convert_file_size_to_int(__lowerCamelCase ) a = [] a = {} a = 0 a = 0 os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp: a = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""] a = flatten_dict(__lowerCamelCase , sep="""/""" ) a = {} for layer in checkpoint_info.keys(): a , a , a = get_key_and_tensorstore_dict( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if curr_real_layer_name in all_layers: a = content else: a = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file a = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() a = torch.tensor(__lowerCamelCase ) a = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts a , a = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __lowerCamelCase ) a = """/""".join(__lowerCamelCase ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: a = os.path.join( __lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) ) rename_and_save_block(__lowerCamelCase , __lowerCamelCase ) sharded_state_dicts.append(current_block.keys() ) del current_block a = {} a = 0 a = raw_weights.to(getattr(__lowerCamelCase , __lowerCamelCase ) ) current_block_size += weight_size total_size += weight_size # Add the last block a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) ) rename_and_save_block(__lowerCamelCase , __lowerCamelCase ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(__lowerCamelCase ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index a = {} a = {} for idx, shard in enumerate(__lowerCamelCase ): a = weights_name.replace( """.bin""" , f'-{idx+1:05d}-of-{len(__lowerCamelCase ):05d}.bin' ) # len(sharded_state_dicts):05d} a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{idx+1:05d}-of-???.bin' ) ) os.rename(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) ) a = shard for key in shard: a = shard_file # Add the metadata a = {"""total_size""": total_size} a = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f: a = json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase ) + """\n""" f.write(__lowerCamelCase ) return metadata, index if __name__ == "__main__": __UpperCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600", type=str, required=False, help="Path to a directory containing a folder per layer. Follows the original Google format.", ) parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size") parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model") parser.add_argument( "--pytorch_dump_folder_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted", type=str, required=False, help="Path to the output pytorch model.", ) __UpperCamelCase : Any = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def __A ( ) -> Tuple: from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer a = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" ) config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" ) a = SwitchTransformersForConditionalGeneration.from_pretrained( """/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" ) a = TaTokenizer.from_pretrained("""t5-small""" ) a = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""" a = tokenizer(__lowerCamelCase , return_tensors="""pt""" ).input_ids a = model.generate(__lowerCamelCase , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
347
1
from functools import reduce __UpperCamelCase : Union[str, Any] = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def __A ( __lowerCamelCase = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda __lowerCamelCase , __lowerCamelCase : str(int(__lowerCamelCase ) * int(__lowerCamelCase ) ) , n[i : i + 13] ) ) for i in range(len(__lowerCamelCase ) - 12 ) ) if __name__ == "__main__": print(F'{solution() = }')
347
import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters __UpperCamelCase : Union[str, Any] = (720, 1_280) # Height, Width __UpperCamelCase : Any = (0.4, 0.6) # if height or width lower than this scale, drop it. __UpperCamelCase : str = 1 / 100 __UpperCamelCase : Optional[int] = "" __UpperCamelCase : List[Any] = "" __UpperCamelCase : Union[str, Any] = "" __UpperCamelCase : Tuple = 250 def __A ( ) -> None: a , a = get_dataset(__lowerCamelCase , __lowerCamelCase ) for index in range(__lowerCamelCase ): a = random.sample(range(len(__lowerCamelCase ) ) , 4 ) a , a , a = update_image_and_anno( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , filter_scale=__lowerCamelCase , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' a = random_chars(32 ) a = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0] a = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}' cva.imwrite(f'{file_root}.jpg' , __lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' ) a = [] for anno in new_annos: a = anno[3] - anno[1] a = anno[4] - anno[2] a = anno[1] + width / 2 a = anno[2] + height / 2 a = f'{anno[0]} {x_center} {y_center} {width} {height}' annos_list.append(__lowerCamelCase ) with open(f'{file_root}.txt' , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> tuple[list, list]: a = [] a = [] for label_file in glob.glob(os.path.join(__lowerCamelCase , """*.txt""" ) ): a = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(__lowerCamelCase ) as in_file: a = in_file.readlines() a = os.path.join(__lowerCamelCase , f'{label_name}.jpg' ) a = [] for obj_list in obj_lists: a = obj_list.rstrip("""\n""" ).split(""" """ ) a = float(obj[1] ) - float(obj[3] ) / 2 a = float(obj[2] ) - float(obj[4] ) / 2 a = float(obj[1] ) + float(obj[3] ) / 2 a = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(__lowerCamelCase ) labels.append(__lowerCamelCase ) return img_paths, labels def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 0.0 , ) -> tuple[list, list, str]: a = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) a = int(scale_x * output_size[1] ) a = int(scale_y * output_size[0] ) a = [] a = [] for i, index in enumerate(__lowerCamelCase ): a = all_img_list[index] path_list.append(__lowerCamelCase ) a = all_annos[index] a = cva.imread(__lowerCamelCase ) if i == 0: # top-left a = cva.resize(__lowerCamelCase , (divid_point_x, divid_point_y) ) a = img for bbox in img_annos: a = bbox[1] * scale_x a = bbox[2] * scale_y a = bbox[3] * scale_x a = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right a = cva.resize(__lowerCamelCase , (output_size[1] - divid_point_x, divid_point_y) ) a = img for bbox in img_annos: a = scale_x + bbox[1] * (1 - scale_x) a = bbox[2] * scale_y a = scale_x + bbox[3] * (1 - scale_x) a = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left a = cva.resize(__lowerCamelCase , (divid_point_x, output_size[0] - divid_point_y) ) a = img for bbox in img_annos: a = bbox[1] * scale_x a = scale_y + bbox[2] * (1 - scale_y) a = bbox[3] * scale_x a = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right a = cva.resize( __lowerCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) a = img for bbox in img_annos: a = scale_x + bbox[1] * (1 - scale_x) a = scale_y + bbox[2] * (1 - scale_y) a = scale_x + bbox[3] * (1 - scale_x) a = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: a = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def __A ( __lowerCamelCase ) -> str: assert number_char > 1, "The number of character should greater than 1" a = ascii_lowercase + digits return "".join(random.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) ) if __name__ == "__main__": main() print("DONE ✅")
347
1
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def __A ( ) -> Tuple: a = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png""" a = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("""RGB""" ) return image def __A ( __lowerCamelCase ) -> str: a = [] # fmt: off # vision encoder rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") ) rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") ) rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") ) rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) ) rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') ) # QFormer rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") ) rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") ) # fmt: on return rename_keys def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]: a = dct.pop(__lowerCamelCase ) a = val def __A ( __lowerCamelCase , __lowerCamelCase ) -> Any: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases a = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' ) a = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' ) # next, set bias in the state dict a = torch.cat((q_bias, torch.zeros_like(__lowerCamelCase , requires_grad=__lowerCamelCase ), v_bias) ) a = qkv_bias def __A ( __lowerCamelCase , __lowerCamelCase ) -> Tuple: a = 364 if """coco""" in model_name else 224 a = BlipaVisionConfig(image_size=__lowerCamelCase ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: a = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=__lowerCamelCase ).to_dict() elif "opt-6.7b" in model_name: a = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=__lowerCamelCase ).to_dict() elif "t5-xl" in model_name: a = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: a = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() a = BlipaConfig(vision_config=__lowerCamelCase , text_config=__lowerCamelCase ) return config, image_size @torch.no_grad() def __A ( __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=False ) -> int: a = ( AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" ) if """opt""" in model_name else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" ) ) a = tokenizer("""\n""" , add_special_tokens=__lowerCamelCase ).input_ids[0] a , a = get_blipa_config(__lowerCamelCase , eos_token_id=__lowerCamelCase ) a = BlipaForConditionalGeneration(__lowerCamelCase ).eval() a = { """blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""), """blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""), """blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""), """blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""), """blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""), """blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""), """blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""), } a , a = model_name_to_original[model_name] # load original model print("""Loading original model...""" ) a = """cuda""" if torch.cuda.is_available() else """cpu""" a , a , a = load_model_and_preprocess( name=__lowerCamelCase , model_type=__lowerCamelCase , is_eval=__lowerCamelCase , device=__lowerCamelCase ) original_model.eval() print("""Done!""" ) # update state dict keys a = original_model.state_dict() a = create_rename_keys(__lowerCamelCase ) for src, dest in rename_keys: rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): a = state_dict.pop(__lowerCamelCase ) if key.startswith("""Qformer.bert""" ): a = key.replace("""Qformer.bert""" , """qformer""" ) if "attention.self" in key: a = key.replace("""self""" , """attention""" ) if "opt_proj" in key: a = key.replace("""opt_proj""" , """language_projection""" ) if "t5_proj" in key: a = key.replace("""t5_proj""" , """language_projection""" ) if key.startswith("""opt""" ): a = key.replace("""opt""" , """language""" ) if key.startswith("""t5""" ): a = key.replace("""t5""" , """language""" ) a = val # read in qv biases read_in_q_v_bias(__lowerCamelCase , __lowerCamelCase ) a , a = hf_model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase ) assert len(__lowerCamelCase ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] a = load_demo_image() a = vis_processors["""eval"""](__lowerCamelCase ).unsqueeze(0 ).to(__lowerCamelCase ) a = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(__lowerCamelCase ) # create processor a = BlipImageProcessor( size={"""height""": image_size, """width""": image_size} , image_mean=__lowerCamelCase , image_std=__lowerCamelCase ) a = BlipaProcessor(image_processor=__lowerCamelCase , tokenizer=__lowerCamelCase ) a = processor(images=__lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(__lowerCamelCase ) # make sure processor creates exact same pixel values assert torch.allclose(__lowerCamelCase , __lowerCamelCase ) original_model.to(__lowerCamelCase ) hf_model.to(__lowerCamelCase ) with torch.no_grad(): if "opt" in model_name: a = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits a = hf_model(__lowerCamelCase , __lowerCamelCase ).logits else: a = original_model( {"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits a = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 ) a = hf_model(__lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase ).logits assert original_logits.shape == logits.shape print("""First values of original logits:""" , original_logits[0, :3, :3] ) print("""First values of HF logits:""" , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": a = torch.tensor( [[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=__lowerCamelCase ) assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1E-4 ) elif model_name == "blip2-flan-t5-xl-coco": a = torch.tensor( [[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=__lowerCamelCase ) else: # cast to same type a = logits.dtype assert torch.allclose(original_logits.to(__lowerCamelCase ) , __lowerCamelCase , atol=1E-2 ) print("""Looks ok!""" ) print("""Generating a caption...""" ) a = """""" a = tokenizer(__lowerCamelCase , return_tensors="""pt""" ).input_ids.to(__lowerCamelCase ) a = original_model.generate({"""image""": original_pixel_values} ) a = hf_model.generate( __lowerCamelCase , __lowerCamelCase , do_sample=__lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print("""Original generation:""" , __lowerCamelCase ) a = input_ids.shape[1] a = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowerCamelCase ) a = [text.strip() for text in output_text] print("""HF generation:""" , __lowerCamelCase ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__lowerCamelCase ) hf_model.save_pretrained(__lowerCamelCase ) if push_to_hub: processor.push_to_hub(f'nielsr/{model_name}' ) hf_model.push_to_hub(f'nielsr/{model_name}' ) if __name__ == "__main__": __UpperCamelCase : int = argparse.ArgumentParser() __UpperCamelCase : List[Any] = [ "blip2-opt-2.7b", "blip2-opt-6.7b", "blip2-opt-2.7b-coco", "blip2-opt-6.7b-coco", "blip2-flan-t5-xl", "blip2-flan-t5-xl-coco", "blip2-flan-t5-xxl", ] parser.add_argument( "--model_name", default="blip2-opt-2.7b", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) __UpperCamelCase : Optional[Any] = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
347
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase : Optional[Any] = { "configuration_mobilenet_v2": [ "MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileNetV2Config", "MobileNetV2OnnxConfig", ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = ["MobileNetV2FeatureExtractor"] __UpperCamelCase : Tuple = ["MobileNetV2ImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = [ "MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileNetV2ForImageClassification", "MobileNetV2ForSemanticSegmentation", "MobileNetV2Model", "MobileNetV2PreTrainedModel", "load_tf_weights_in_mobilenet_v2", ] if TYPE_CHECKING: from .configuration_mobilenet_va import ( MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileNetVaConfig, MobileNetVaOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor from .image_processing_mobilenet_va import MobileNetVaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilenet_va import ( MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel, MobileNetVaPreTrainedModel, load_tf_weights_in_mobilenet_va, ) else: import sys __UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
347
1
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ): @register_to_config def __init__( self :List[str] , __magic_name__ :int , __magic_name__ :int , __magic_name__ :int , __magic_name__ :float , __magic_name__ :int , __magic_name__ :int , __magic_name__ :int , __magic_name__ :int , __magic_name__ :str , __magic_name__ :bool = False , ): '''simple docstring''' super().__init__() a = nn.Embedding(__magic_name__ , __magic_name__ ) a = nn.Embedding(__magic_name__ , __magic_name__ ) a = False a = nn.Dropout(p=__magic_name__ ) a = TaConfig( vocab_size=__magic_name__ , d_model=__magic_name__ , num_heads=__magic_name__ , d_kv=__magic_name__ , d_ff=__magic_name__ , dropout_rate=__magic_name__ , feed_forward_proj=__magic_name__ , is_decoder=__magic_name__ , is_encoder_decoder=__magic_name__ , ) a = nn.ModuleList() for lyr_num in range(__magic_name__ ): a = TaBlock(__magic_name__ ) self.encoders.append(__magic_name__ ) a = TaLayerNorm(__magic_name__ ) a = nn.Dropout(p=__magic_name__ ) def lowerCamelCase__ ( self :str , __magic_name__ :Optional[int] , __magic_name__ :Any ): '''simple docstring''' a = self.token_embedder(__magic_name__ ) a = encoder_input_tokens.shape[1] a = torch.arange(__magic_name__ , device=encoder_input_tokens.device ) x += self.position_encoding(__magic_name__ ) a = self.dropout_pre(__magic_name__ ) # inverted the attention mask a = encoder_input_tokens.size() a = self.get_extended_attention_mask(__magic_name__ , __magic_name__ ) for lyr in self.encoders: a = lyr(__magic_name__ , __magic_name__ )[0] a = self.layer_norm(__magic_name__ ) return self.dropout_post(__magic_name__ ), encoder_inputs_mask
347
def __A ( __lowerCamelCase ) -> bool: if num < 0: return False a = num a = 0 while num > 0: a = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
347
1
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __lowerCAmelCase ( unittest.TestCase ): def __init__( self :Dict , __magic_name__ :Optional[Any] , __magic_name__ :Tuple=7 , __magic_name__ :Union[str, Any]=3 , __magic_name__ :Optional[int]=18 , __magic_name__ :Union[str, Any]=30 , __magic_name__ :Optional[int]=400 , __magic_name__ :List[Any]=True , __magic_name__ :Union[str, Any]=None , __magic_name__ :Dict=True , ): '''simple docstring''' a = size if size is not None else {"""height""": 18, """width""": 18} a = parent a = batch_size a = num_channels a = image_size a = min_resolution a = max_resolution a = do_resize a = size a = apply_ocr def lowerCamelCase__ ( self :str ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): UpperCamelCase__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = LayoutLMvaImageProcessingTester(self ) @property def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase__ ( self :str ): '''simple docstring''' a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) ) self.assertTrue(hasattr(__magic_name__ , """size""" ) ) self.assertTrue(hasattr(__magic_name__ , """apply_ocr""" ) ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' pass def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="""pt""" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) self.assertIsInstance(encoding.words , __magic_name__ ) self.assertIsInstance(encoding.boxes , __magic_name__ ) # Test batched a = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , np.ndarray ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched a = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched a = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = LayoutLMvaImageProcessor() from datasets import load_dataset a = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" ) a = Image.open(ds[0]["""file"""] ).convert("""RGB""" ) a = image_processing(__magic_name__ , return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 a = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 a = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , __magic_name__ ) self.assertListEqual(encoding.boxes , __magic_name__ ) # with apply_OCR = False a = LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) a = image_processing(__magic_name__ , return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
347
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): UpperCamelCase__ = CanineTokenizer UpperCamelCase__ = False def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' super().setUp() a = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowerCamelCase__ ( self :Dict ): '''simple docstring''' return CanineTokenizer.from_pretrained("""google/canine-s""" ) def lowerCamelCase__ ( self :Tuple , **__magic_name__ :Dict ): '''simple docstring''' a = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ ) a = 1024 return tokenizer @require_torch def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.canine_tokenizer a = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""] # fmt: off a = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0] # fmt: on a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) self.assertIsInstance(__magic_name__ , __magic_name__ ) a = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.canine_tokenizer a = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""] a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("""input_ids""" , __magic_name__ ) self.assertIn("""attention_mask""" , __magic_name__ ) self.assertIn("""token_type_ids""" , __magic_name__ ) @require_torch def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.canine_tokenizer a = [ """What's the weater?""", """It's about 25 degrees.""", ] a = tokenizer( text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test a = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc a = tempfile.mkdtemp() a = """ He is very happy, UNwant\u00E9d,running""" a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) a = tokenizer.__class__.from_pretrained(__magic_name__ ) a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) shutil.rmtree(__magic_name__ ) a = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc a = tempfile.mkdtemp() a = """ He is very happy, UNwant\u00E9d,running""" a = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: a = chr(0Xe_0_0_7 ) additional_special_tokens.append(__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) a = tokenizer.__class__.from_pretrained(__magic_name__ ) a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) a = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__magic_name__ ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a , a = self.get_clean_sequence(__magic_name__ ) # a special token for Canine can be defined as follows: a = 0Xe_0_0_5 a = chr(__magic_name__ ) tokenizer.add_special_tokens({"""cls_token""": special_token} ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) a = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(__magic_name__ , input_encoded + special_token_id ) a = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) self.assertTrue(special_token not in decoded ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a = chr(0Xe_0_0_5 ) a = chr(0Xe_0_0_6 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} ) a = tokenizer.tokenize(__magic_name__ ) a = tokenizer.tokenize(__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(token_a[0] , __magic_name__ ) self.assertEqual(token_a[0] , __magic_name__ ) @require_tokenizers def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # a special token for Canine can be defined as follows: a = 0Xe_0_0_6 a = chr(__magic_name__ ) a = AddedToken(__magic_name__ , lstrip=__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(__magic_name__ ) tokenizer.from_pretrained(__magic_name__ ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__magic_name__ ) with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: a = json.load(__magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: a = json.load(__magic_name__ ) # a special token for Canine can be defined as follows: a = 0Xe_0_0_6 a = chr(__magic_name__ ) a = [new_token_a] a = [new_token_a] with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files a = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) a = 0Xe_0_0_7 a = chr(__magic_name__ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained a = [AddedToken(__magic_name__ , lstrip=__magic_name__ )] a = tokenizer_class.from_pretrained( __magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a = """hello world""" if self.space_between_special_tokens: a = """[CLS] hello world [SEP]""" else: a = input a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) a = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(__magic_name__ , [output, output.lower()] ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] a = """a""" a = ord(__magic_name__ ) for attr in attributes_list: setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] ) a = 0Xe_0_0_6 a = chr(__magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' pass def lowerCamelCase__ ( self :str ): '''simple docstring''' pass def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass def lowerCamelCase__ ( self :Any ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass
347
1
__UpperCamelCase : Optional[int] = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]: # Return True if there is node that has not iterated. a = [False] * len(__lowerCamelCase ) a = [s] a = True while queue: a = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__lowerCamelCase ) a = True a = u return visited[t] def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: a = [-1] * (len(__lowerCamelCase )) a = 0 a = [] a = [i[:] for i in graph] # Record original cut, copy. while bfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): a = float("""Inf""" ) a = sink while s != source: # Find the minimum value in select path a = min(__lowerCamelCase , graph[parent[s]][s] ) a = parent[s] max_flow += path_flow a = sink while v != source: a = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow a = parent[v] for i in range(len(__lowerCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
347
def __A ( __lowerCamelCase ) -> bool: return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
347
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) __UpperCamelCase : int = { "shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json", # See all Nat models at https://huggingface.co/models?filter=nat } class __lowerCAmelCase ( __magic_name__ , __magic_name__ ): UpperCamelCase__ = '''nat''' UpperCamelCase__ = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self :Any , __magic_name__ :int=4 , __magic_name__ :Dict=3 , __magic_name__ :List[str]=64 , __magic_name__ :Optional[int]=[3, 4, 6, 5] , __magic_name__ :int=[2, 4, 8, 16] , __magic_name__ :str=7 , __magic_name__ :Tuple=3.0 , __magic_name__ :Dict=True , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.1 , __magic_name__ :Optional[Any]="gelu" , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :Tuple=1E-5 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :int=None , __magic_name__ :Any=None , **__magic_name__ :Dict , ): '''simple docstring''' super().__init__(**__magic_name__ ) a = patch_size a = num_channels a = embed_dim a = depths a = len(__magic_name__ ) a = num_heads a = kernel_size a = mlp_ratio a = qkv_bias a = hidden_dropout_prob a = attention_probs_dropout_prob a = drop_path_rate a = hidden_act a = layer_norm_eps a = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model a = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) ) a = layer_scale_init_value a = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(__magic_name__ ) + 1 )] a , a = get_aligned_output_features_output_indices( out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
347
def __A ( __lowerCamelCase ) -> int: if not numbers: return 0 if not isinstance(__lowerCamelCase , (list, tuple) ) or not all( isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ): raise ValueError("""numbers must be an iterable of integers""" ) a = a = a = numbers[0] for i in range(1 , len(__lowerCamelCase ) ): # update the maximum and minimum subarray products a = numbers[i] if number < 0: a , a = min_till_now, max_till_now a = max(__lowerCamelCase , max_till_now * number ) a = min(__lowerCamelCase , min_till_now * number ) # update the maximum product found till now a = max(__lowerCamelCase , __lowerCamelCase ) return max_prod
347
1
from __future__ import annotations import math import random from typing import Any class __lowerCAmelCase : def __init__( self :int ): '''simple docstring''' a = [] a = 0 a = 0 def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' return self.head == self.tail def lowerCamelCase__ ( self :Tuple , __magic_name__ :Any ): '''simple docstring''' self.data.append(__magic_name__ ) a = self.tail + 1 def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.data[self.head] a = self.head + 1 return ret def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' return self.tail - self.head def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' print(self.data ) print("""**************""" ) print(self.data[self.head : self.tail] ) class __lowerCAmelCase : def __init__( self :Tuple , __magic_name__ :Any ): '''simple docstring''' a = data a = None a = None a = 1 def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' return self.data def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' return self.left def lowerCamelCase__ ( self :int ): '''simple docstring''' return self.right def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' return self.height def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Any ): '''simple docstring''' a = data def lowerCamelCase__ ( self :int , __magic_name__ :MyNode | None ): '''simple docstring''' a = node def lowerCamelCase__ ( self :int , __magic_name__ :MyNode | None ): '''simple docstring''' a = node def lowerCamelCase__ ( self :List[Any] , __magic_name__ :int ): '''simple docstring''' a = height def __A ( __lowerCamelCase ) -> int: if node is None: return 0 return node.get_height() def __A ( __lowerCamelCase , __lowerCamelCase ) -> int: if a > b: return a return b def __A ( __lowerCamelCase ) -> MyNode: print("""left rotation node:""" , node.get_data() ) a = node.get_left() assert ret is not None node.set_left(ret.get_right() ) ret.set_right(__lowerCamelCase ) a = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(__lowerCamelCase ) a = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(__lowerCamelCase ) return ret def __A ( __lowerCamelCase ) -> MyNode: print("""right rotation node:""" , node.get_data() ) a = node.get_right() assert ret is not None node.set_right(ret.get_left() ) ret.set_left(__lowerCamelCase ) a = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(__lowerCamelCase ) a = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(__lowerCamelCase ) return ret def __A ( __lowerCamelCase ) -> MyNode: a = node.get_left() assert left_child is not None node.set_left(left_rotation(__lowerCamelCase ) ) return right_rotation(__lowerCamelCase ) def __A ( __lowerCamelCase ) -> MyNode: a = node.get_right() assert right_child is not None node.set_right(right_rotation(__lowerCamelCase ) ) return left_rotation(__lowerCamelCase ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> MyNode | None: if node is None: return MyNode(__lowerCamelCase ) if data < node.get_data(): node.set_left(insert_node(node.get_left() , __lowerCamelCase ) ) if ( get_height(node.get_left() ) - get_height(node.get_right() ) == 2 ): # an unbalance detected a = node.get_left() assert left_child is not None if ( data < left_child.get_data() ): # new node is the left child of the left child a = right_rotation(__lowerCamelCase ) else: a = lr_rotation(__lowerCamelCase ) else: node.set_right(insert_node(node.get_right() , __lowerCamelCase ) ) if get_height(node.get_right() ) - get_height(node.get_left() ) == 2: a = node.get_right() assert right_child is not None if data < right_child.get_data(): a = rl_rotation(__lowerCamelCase ) else: a = left_rotation(__lowerCamelCase ) a = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(__lowerCamelCase ) return node def __A ( __lowerCamelCase ) -> Any: while True: a = root.get_right() if right_child is None: break a = right_child return root.get_data() def __A ( __lowerCamelCase ) -> Any: while True: a = root.get_left() if left_child is None: break a = left_child return root.get_data() def __A ( __lowerCamelCase , __lowerCamelCase ) -> MyNode | None: a = root.get_left() a = root.get_right() if root.get_data() == data: if left_child is not None and right_child is not None: a = get_left_most(__lowerCamelCase ) root.set_data(__lowerCamelCase ) root.set_right(del_node(__lowerCamelCase , __lowerCamelCase ) ) elif left_child is not None: a = left_child elif right_child is not None: a = right_child else: return None elif root.get_data() > data: if left_child is None: print("""No such data""" ) return root else: root.set_left(del_node(__lowerCamelCase , __lowerCamelCase ) ) else: # root.get_data() < data if right_child is None: return root else: root.set_right(del_node(__lowerCamelCase , __lowerCamelCase ) ) if get_height(__lowerCamelCase ) - get_height(__lowerCamelCase ) == 2: assert right_child is not None if get_height(right_child.get_right() ) > get_height(right_child.get_left() ): a = left_rotation(__lowerCamelCase ) else: a = rl_rotation(__lowerCamelCase ) elif get_height(__lowerCamelCase ) - get_height(__lowerCamelCase ) == -2: assert left_child is not None if get_height(left_child.get_left() ) > get_height(left_child.get_right() ): a = right_rotation(__lowerCamelCase ) else: a = lr_rotation(__lowerCamelCase ) a = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1 root.set_height(__lowerCamelCase ) return root class __lowerCAmelCase : def __init__( self :int ): '''simple docstring''' a = None def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' return get_height(self.root ) def lowerCamelCase__ ( self :List[str] , __magic_name__ :Any ): '''simple docstring''' print("""insert:""" + str(__magic_name__ ) ) a = insert_node(self.root , __magic_name__ ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :Any ): '''simple docstring''' print("""delete:""" + str(__magic_name__ ) ) if self.root is None: print("""Tree is empty!""" ) return a = del_node(self.root , __magic_name__ ) def __str__( self :List[str] , ): # a level traversale, gives a more intuitive look on the tree '''simple docstring''' a = """""" a = MyQueue() q.push(self.root ) a = self.get_height() if layer == 0: return output a = 0 while not q.is_empty(): a = q.pop() a = """ """ * int(math.pow(2 , layer - 1 ) ) output += space if node is None: output += "*" q.push(__magic_name__ ) q.push(__magic_name__ ) else: output += str(node.get_data() ) q.push(node.get_left() ) q.push(node.get_right() ) output += space a = cnt + 1 for i in range(100 ): if cnt == math.pow(2 , __magic_name__ ) - 1: a = layer - 1 if layer == 0: output += "\n*************************************" return output output += "\n" break output += "\n*************************************" return output def __A ( ) -> None: import doctest doctest.testmod() if __name__ == "__main__": _test() __UpperCamelCase : Optional[int] = AVLtree() __UpperCamelCase : int = list(range(10)) random.shuffle(lst) for i in lst: t.insert(i) print(str(t)) random.shuffle(lst) for i in lst: t.del_node(i) print(str(t))
347
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __UpperCamelCase : Optional[Any] = { "configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"], "feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"], "processing_wav2vec2": ["Wav2Vec2Processor"], "tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : str = [ "WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "Wav2Vec2ForAudioFrameClassification", "Wav2Vec2ForCTC", "Wav2Vec2ForMaskedLM", "Wav2Vec2ForPreTraining", "Wav2Vec2ForSequenceClassification", "Wav2Vec2ForXVector", "Wav2Vec2Model", "Wav2Vec2PreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Dict = [ "TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "TFWav2Vec2ForCTC", "TFWav2Vec2Model", "TFWav2Vec2PreTrainedModel", "TFWav2Vec2ForSequenceClassification", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Union[str, Any] = [ "FlaxWav2Vec2ForCTC", "FlaxWav2Vec2ForPreTraining", "FlaxWav2Vec2Model", "FlaxWav2Vec2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
347
1
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) __UpperCamelCase : List[str] = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = '''blenderbot-small''' UpperCamelCase__ = ['''past_key_values'''] UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self :Optional[int] , __magic_name__ :Optional[Any]=5_0265 , __magic_name__ :int=512 , __magic_name__ :Dict=8 , __magic_name__ :int=2048 , __magic_name__ :Any=16 , __magic_name__ :int=8 , __magic_name__ :int=2048 , __magic_name__ :List[Any]=16 , __magic_name__ :List[str]=0.0 , __magic_name__ :Optional[Any]=0.0 , __magic_name__ :Union[str, Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Union[str, Any]="gelu" , __magic_name__ :Optional[Any]=512 , __magic_name__ :int=0.1 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :List[str]=0.0 , __magic_name__ :Tuple=0.02 , __magic_name__ :Union[str, Any]=1 , __magic_name__ :Dict=False , __magic_name__ :List[str]=0 , __magic_name__ :int=1 , __magic_name__ :Optional[Any]=2 , __magic_name__ :int=2 , **__magic_name__ :int , ): '''simple docstring''' a = vocab_size a = max_position_embeddings a = d_model a = encoder_ffn_dim a = encoder_layers a = encoder_attention_heads a = decoder_ffn_dim a = decoder_layers a = decoder_attention_heads a = dropout a = attention_dropout a = activation_dropout a = activation_function a = init_std a = encoder_layerdrop a = decoder_layerdrop a = use_cache a = encoder_layers a = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , decoder_start_token_id=__magic_name__ , forced_eos_token_id=__magic_name__ , **__magic_name__ , ) class __lowerCAmelCase ( __magic_name__ ): @property def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: a = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: a = {0: """batch"""} a = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: a = {0: """batch""", 1: """decoder_sequence"""} a = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" ) elif self.task == "causal-lm": # TODO: figure this case out. a = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: a , a = self.num_layers for i in range(__magic_name__ ): a = {0: """batch""", 2: """past_sequence + sequence"""} a = {0: """batch""", 2: """past_sequence + sequence"""} else: a = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}), ("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}), ] ) return common_inputs @property def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: a = super().outputs else: a = super(__magic_name__ , self ).outputs if self.use_past: a , a = self.num_layers for i in range(__magic_name__ ): a = {0: """batch""", 2: """past_sequence + sequence"""} a = {0: """batch""", 2: """past_sequence + sequence"""} return common_outputs def lowerCamelCase__ ( self :int , __magic_name__ :PreTrainedTokenizer , __magic_name__ :int = -1 , __magic_name__ :int = -1 , __magic_name__ :bool = False , __magic_name__ :Optional[TensorType] = None , ): '''simple docstring''' a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) # Generate decoder inputs a = seq_length if not self.use_past else 1 a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) a = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} a = dict(**__magic_name__ , **__magic_name__ ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch a , a = common_inputs["""input_ids"""].shape a = common_inputs["""decoder_input_ids"""].shape[1] a , a = self.num_attention_heads a = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) a = decoder_seq_length + 3 a = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) a = torch.cat( [common_inputs["""decoder_attention_mask"""], torch.ones(__magic_name__ , __magic_name__ )] , dim=1 ) a = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered a , a = self.num_layers a = min(__magic_name__ , __magic_name__ ) a = max(__magic_name__ , __magic_name__ ) - min_num_layers a = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder""" for _ in range(__magic_name__ ): common_inputs["past_key_values"].append( ( torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ ), ) ) # TODO: test this. a = encoder_shape if remaining_side_name == """encoder""" else decoder_shape for _ in range(__magic_name__ , __magic_name__ ): common_inputs["past_key_values"].append((torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) ) return common_inputs def lowerCamelCase__ ( self :Dict , __magic_name__ :PreTrainedTokenizer , __magic_name__ :int = -1 , __magic_name__ :int = -1 , __magic_name__ :bool = False , __magic_name__ :Optional[TensorType] = None , ): '''simple docstring''' a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch a , a = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values a = seqlen + 2 a , a = self.num_layers a , a = self.num_attention_heads a = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) a = common_inputs["""attention_mask"""].dtype a = torch.cat( [common_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 ) a = [ (torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(__magic_name__ ) ] return common_inputs def lowerCamelCase__ ( self :Tuple , __magic_name__ :PreTrainedTokenizer , __magic_name__ :int = -1 , __magic_name__ :int = -1 , __magic_name__ :bool = False , __magic_name__ :Optional[TensorType] = None , ): '''simple docstring''' a = compute_effective_axis_dimension( __magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX a = tokenizer.num_special_tokens_to_add(__magic_name__ ) a = compute_effective_axis_dimension( __magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ ) # Generate dummy inputs according to compute batch and sequence a = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size a = dict(tokenizer(__magic_name__ , return_tensors=__magic_name__ ) ) return common_inputs def lowerCamelCase__ ( self :Any , __magic_name__ :PreTrainedTokenizer , __magic_name__ :int = -1 , __magic_name__ :int = -1 , __magic_name__ :bool = False , __magic_name__ :Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: a = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ ) elif self.task == "causal-lm": a = self._generate_dummy_inputs_for_causal_lm( __magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ ) else: a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ ) return common_inputs def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Union[str, Any] , __magic_name__ :List[Any] , __magic_name__ :int , __magic_name__ :Any ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: a = super()._flatten_past_key_values_(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) else: a = super(__magic_name__ , self )._flatten_past_key_values_( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
347
import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any: if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , __lowerCamelCase ) a = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: a = dataset_size < in_memory_max_size else: a = False a = is_small_dataset(__lowerCamelCase ) assert result == expected
347
1
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> float: a = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError("""All input parameters must be positive""" ) if any(p > 1 for p in parameters[1:4] ): raise ValueError("""Relative densities cannot be greater than one""" ) else: a = 1 - (matter_density + radiation_density + dark_energy) a = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) a = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation __UpperCamelCase : Union[str, Any] = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
347
from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def __A ( __lowerCamelCase ) -> bool: a = int(number**0.5 ) return number == sq * sq def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> tuple[int, int]: a = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den a = x_den * y_den * z_den a = gcd(__lowerCamelCase , __lowerCamelCase ) top //= hcf bottom //= hcf return top, bottom def __A ( __lowerCamelCase = 35 ) -> int: a = set() a = 42 a = Fraction(0 ) a = 42 for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 a = x_num * y_den + x_den * y_num a = x_den * y_den a = gcd(__lowerCamelCase , __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: a = add_three( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) unique_s.add(__lowerCamelCase ) # n=2 a = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) a = x_den * x_den * y_den * y_den if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ): a = int(sqrt(__lowerCamelCase ) ) a = int(sqrt(__lowerCamelCase ) ) a = gcd(__lowerCamelCase , __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: a = add_three( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) unique_s.add(__lowerCamelCase ) # n=-1 a = x_num * y_num a = x_den * y_num + x_num * y_den a = gcd(__lowerCamelCase , __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: a = add_three( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) unique_s.add(__lowerCamelCase ) # n=2 a = x_num * x_num * y_num * y_num a = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ): a = int(sqrt(__lowerCamelCase ) ) a = int(sqrt(__lowerCamelCase ) ) a = gcd(__lowerCamelCase , __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: a = add_three( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) unique_s.add(__lowerCamelCase ) for num, den in unique_s: total += Fraction(__lowerCamelCase , __lowerCamelCase ) return total.denominator + total.numerator if __name__ == "__main__": print(F'{solution() = }')
347
1
__UpperCamelCase : Tuple = range(2, 20 + 1) __UpperCamelCase : Tuple = [10**k for k in range(ks[-1] + 1)] __UpperCamelCase : dict[int, dict[int, list[list[int]]]] = {} def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict: a = sum(a_i[j] for j in range(__lowerCamelCase , len(__lowerCamelCase ) ) ) a = sum(a_i[j] * base[j] for j in range(min(len(__lowerCamelCase ) , __lowerCamelCase ) ) ) a , a = 0, 0 a = n - i a = memo.get(__lowerCamelCase ) if sub_memo is not None: a = sub_memo.get(__lowerCamelCase ) if jumps is not None and len(__lowerCamelCase ) > 0: # find and make the largest jump without going over a = -1 for _k in range(len(__lowerCamelCase ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: a = _k break if max_jump >= 0: a , a , a = jumps[max_jump] # since the difference between jumps is cached, add c a = diff + c for j in range(min(__lowerCamelCase , len(__lowerCamelCase ) ) ): a , a = divmod(__lowerCamelCase , 10 ) if new_c > 0: add(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) else: a = [] else: a = {c: []} a = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps a , a = next_term(__lowerCamelCase , k - 1 , i + dn , __lowerCamelCase ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead a , a = compute(__lowerCamelCase , __lowerCamelCase , i + dn , __lowerCamelCase ) diff += _diff dn += terms_jumped a = sub_memo[c] # keep jumps sorted by # of terms skipped a = 0 while j < len(__lowerCamelCase ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(__lowerCamelCase , (diff, dn, k) ) return (diff, dn) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Tuple: if i >= n: return 0, i if k > len(__lowerCamelCase ): a_i.extend([0 for _ in range(k - len(__lowerCamelCase ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) a = i a , a , a = 0, 0, 0 for j in range(len(__lowerCamelCase ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 a = ds_c + ds_b diff += addend a = 0 for j in range(__lowerCamelCase ): a = a_i[j] + addend a , a = divmod(__lowerCamelCase , 10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return diff, i - start_i def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Tuple: for j in range(__lowerCamelCase , len(__lowerCamelCase ) ): a = digits[j] + addend if s >= 10: a , a = divmod(__lowerCamelCase , 10 ) a = addend // 10 + quotient else: a = s a = addend // 10 if addend == 0: break while addend > 0: a , a = divmod(__lowerCamelCase , 10 ) digits.append(__lowerCamelCase ) def __A ( __lowerCamelCase = 10**15 ) -> int: a = [1] a = 1 a = 0 while True: a , a = next_term(__lowerCamelCase , 20 , i + dn , __lowerCamelCase ) dn += terms_jumped if dn == n - i: break a = 0 for j in range(len(__lowerCamelCase ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(F'{solution() = }')
347
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __lowerCAmelCase ( unittest.TestCase ): def __init__( self :List[str] , __magic_name__ :List[str] , __magic_name__ :List[Any]=13 , __magic_name__ :Any=7 , __magic_name__ :Optional[int]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :Any=99 , __magic_name__ :List[str]=32 , __magic_name__ :List[str]=5 , __magic_name__ :str=4 , __magic_name__ :str=37 , __magic_name__ :Optional[int]="gelu" , __magic_name__ :int=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :List[str]=512 , __magic_name__ :Tuple=16 , __magic_name__ :Tuple=2 , __magic_name__ :List[str]=0.02 , __magic_name__ :Any=4 , ): '''simple docstring''' a = parent a = batch_size a = seq_length a = is_training a = use_attention_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_choices def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_attention_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = self.prepare_config_and_inputs() a , a , a , a = config_and_inputs a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): UpperCamelCase__ = True UpperCamelCase__ = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = FlaxRoFormerModelTester(self ) @slow def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' for model_class_name in self.all_model_classes: a = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__magic_name__ ) a = model(np.ones((1, 1) ) ) self.assertIsNotNone(__magic_name__ ) @require_flax class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) a = jnp.array([[0, 1, 2, 3, 4, 5]] ) a = model(__magic_name__ )[0] a = 5_0000 a = (1, 6, vocab_size) self.assertEqual(output.shape , __magic_name__ ) a = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
347
1
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __UpperCamelCase : int = logging.get_logger(__name__) class __lowerCAmelCase ( __magic_name__ ): def __init__( self :Optional[Any] , *__magic_name__ :Dict , **__magic_name__ :Tuple ): '''simple docstring''' warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" , __magic_name__ , ) super().__init__(*__magic_name__ , **__magic_name__ )
347
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __UpperCamelCase : Optional[int] = { "configuration_blenderbot": [ "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotOnnxConfig", ], "tokenization_blenderbot": ["BlenderbotTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : List[Any] = ["BlenderbotTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Union[str, Any] = [ "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotForCausalLM", "BlenderbotForConditionalGeneration", "BlenderbotModel", "BlenderbotPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = [ "TFBlenderbotForConditionalGeneration", "TFBlenderbotModel", "TFBlenderbotPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ "FlaxBlenderbotForConditionalGeneration", "FlaxBlenderbotModel", "FlaxBlenderbotPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys __UpperCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
347
1
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) __UpperCamelCase : Optional[Any] = { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json", } class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = '''mvp''' UpperCamelCase__ = ['''past_key_values'''] UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self :int , __magic_name__ :Union[str, Any]=5_0267 , __magic_name__ :List[Any]=1024 , __magic_name__ :Optional[int]=12 , __magic_name__ :Optional[Any]=4096 , __magic_name__ :str=16 , __magic_name__ :Any=12 , __magic_name__ :Tuple=4096 , __magic_name__ :Tuple=16 , __magic_name__ :Optional[int]=0.0 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :Dict="gelu" , __magic_name__ :Optional[Any]=1024 , __magic_name__ :str=0.1 , __magic_name__ :str=0.0 , __magic_name__ :Dict=0.0 , __magic_name__ :Union[str, Any]=0.02 , __magic_name__ :List[Any]=0.0 , __magic_name__ :Tuple=False , __magic_name__ :Dict=True , __magic_name__ :Tuple=1 , __magic_name__ :List[Any]=0 , __magic_name__ :Optional[int]=2 , __magic_name__ :int=True , __magic_name__ :Optional[Any]=2 , __magic_name__ :Tuple=2 , __magic_name__ :Optional[Any]=False , __magic_name__ :int=100 , __magic_name__ :Dict=800 , **__magic_name__ :int , ): '''simple docstring''' a = vocab_size a = max_position_embeddings a = d_model a = encoder_ffn_dim a = encoder_layers a = encoder_attention_heads a = decoder_ffn_dim a = decoder_layers a = decoder_attention_heads a = dropout a = attention_dropout a = activation_dropout a = activation_function a = init_std a = encoder_layerdrop a = decoder_layerdrop a = classifier_dropout a = use_cache a = encoder_layers a = scale_embedding # scale factor will be sqrt(d_model) if True a = use_prompt a = prompt_length a = prompt_mid_dim super().__init__( pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , decoder_start_token_id=__magic_name__ , forced_eos_token_id=__magic_name__ , **__magic_name__ , ) if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __magic_name__ ): a = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' """The config can simply be saved and uploaded again to be fixed.""" )
347
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = (IPNDMScheduler,) UpperCamelCase__ = (('''num_inference_steps''', 50),) def lowerCamelCase__ ( self :Any , **__magic_name__ :Optional[Any] ): '''simple docstring''' a = {"""num_train_timesteps""": 1000} config.update(**__magic_name__ ) return config def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple=0 , **__magic_name__ :Optional[int] ): '''simple docstring''' a = dict(self.forward_default_kwargs ) a = kwargs.pop("""num_inference_steps""" , __magic_name__ ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__magic_name__ ) a = scheduler_class(**__magic_name__ ) scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residuals a = dummy_past_residuals[:] if time_step is None: a = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__magic_name__ ) a = scheduler_class.from_pretrained(__magic_name__ ) new_scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residuals a = dummy_past_residuals[:] a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[Any]=0 , **__magic_name__ :Any ): '''simple docstring''' a = dict(self.forward_default_kwargs ) a = kwargs.pop("""num_inference_steps""" , __magic_name__ ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__magic_name__ ) scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residuals (must be after setting timesteps) a = dummy_past_residuals[:] if time_step is None: a = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__magic_name__ ) a = scheduler_class.from_pretrained(__magic_name__ ) # copy over dummy past residuals new_scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residual (must be after setting timesteps) a = dummy_past_residuals[:] a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCamelCase__ ( self :Optional[Any] , **__magic_name__ :Optional[int] ): '''simple docstring''' a = self.scheduler_classes[0] a = self.get_scheduler_config(**__magic_name__ ) a = scheduler_class(**__magic_name__ ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter scheduler.set_timesteps(__magic_name__ ) for i, t in enumerate(scheduler.timesteps ): a = model(__magic_name__ , __magic_name__ ) a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample for i, t in enumerate(scheduler.timesteps ): a = model(__magic_name__ , __magic_name__ ) a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample return sample def lowerCamelCase__ ( self :str ): '''simple docstring''' a = dict(self.forward_default_kwargs ) a = kwargs.pop("""num_inference_steps""" , __magic_name__ ) for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__magic_name__ ) a = self.dummy_sample a = 0.1 * sample if num_inference_steps is not None and hasattr(__magic_name__ , """set_timesteps""" ): scheduler.set_timesteps(__magic_name__ ) elif num_inference_steps is not None and not hasattr(__magic_name__ , """set_timesteps""" ): a = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] a = dummy_past_residuals[:] a = scheduler.timesteps[5] a = scheduler.timesteps[6] a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=__magic_name__ , time_step=__magic_name__ ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=__magic_name__ , time_step=__magic_name__ ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.full_loop() a = torch.mean(torch.abs(__magic_name__ ) ) assert abs(result_mean.item() - 254_0529 ) < 10
347
1
import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __UpperCamelCase : List[str] = logging.getLogger(__name__) __UpperCamelCase : str = tf.data.AUTOTUNE def __A ( ) -> Union[str, Any]: a = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" ) parser.add_argument( """--pretrained_model_config""" , type=__lowerCamelCase , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , ) parser.add_argument( """--tokenizer""" , type=__lowerCamelCase , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , ) parser.add_argument( """--per_replica_batch_size""" , type=__lowerCamelCase , default=8 , help="""Batch size per TPU core.""" , ) parser.add_argument( """--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , ) parser.add_argument( """--tpu_name""" , type=__lowerCamelCase , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , ) parser.add_argument( """--tpu_zone""" , type=__lowerCamelCase , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , ) parser.add_argument( """--gcp_project""" , type=__lowerCamelCase , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" ) parser.add_argument( """--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , ) parser.add_argument( """--train_dataset""" , type=__lowerCamelCase , help="""Path to training dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--shuffle_buffer_size""" , type=__lowerCamelCase , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , ) parser.add_argument( """--eval_dataset""" , type=__lowerCamelCase , help="""Path to evaluation dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--num_epochs""" , type=__lowerCamelCase , default=1 , help="""Number of epochs to train for.""" , ) parser.add_argument( """--learning_rate""" , type=__lowerCamelCase , default=1E-4 , help="""Learning rate to use for training.""" , ) parser.add_argument( """--weight_decay_rate""" , type=__lowerCamelCase , default=1E-3 , help="""Weight decay rate to use for training.""" , ) parser.add_argument( """--max_length""" , type=__lowerCamelCase , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , ) parser.add_argument( """--mlm_probability""" , type=__lowerCamelCase , default=0.15 , help="""Fraction of tokens to mask during training.""" , ) parser.add_argument("""--output_dir""" , type=__lowerCamelCase , required=__lowerCamelCase , help="""Path to save model checkpoints to.""" ) parser.add_argument("""--hub_model_id""" , type=__lowerCamelCase , help="""Model ID to upload to on the Hugging Face Hub.""" ) a = parser.parse_args() return args def __A ( __lowerCamelCase ) -> Dict: try: if args.tpu_name: a = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: a = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( """Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """ """--gcp_project. When running on a TPU VM, use --tpu_name local.""" ) tf.config.experimental_connect_to_cluster(__lowerCamelCase ) tf.tpu.experimental.initialize_tpu_system(__lowerCamelCase ) return tpu def __A ( __lowerCamelCase ) -> Union[str, Any]: a = 0 for file in file_list: a = file.split("""/""" )[-1] a = re.search(R"""-\d+-(\d+)\.tfrecord""" , __lowerCamelCase ).group(1 ) a = int(__lowerCamelCase ) num_samples += sample_count return num_samples def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ) -> List[Any]: a = count_samples(__lowerCamelCase ) a = tf.data.Dataset.from_tensor_slices(__lowerCamelCase ) if shuffle: a = dataset.shuffle(len(__lowerCamelCase ) ) a = tf.data.TFRecordDataset(__lowerCamelCase , num_parallel_reads=__lowerCamelCase ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here a = dataset.apply(tf.data.experimental.assert_cardinality(__lowerCamelCase ) ) a = dataset.map(__lowerCamelCase , num_parallel_calls=__lowerCamelCase ) if shuffle: assert shuffle_buffer_size is not None a = dataset.shuffle(args.shuffle_buffer_size ) a = dataset.batch(__lowerCamelCase , drop_remainder=__lowerCamelCase ) a = dataset.map(__lowerCamelCase , num_parallel_calls=__lowerCamelCase ) a = dataset.prefetch(__lowerCamelCase ) return dataset def __A ( __lowerCamelCase ) -> Optional[Any]: if not args.no_tpu: a = initialize_tpu(__lowerCamelCase ) a = tf.distribute.TPUStrategy(__lowerCamelCase ) else: a = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" ) a = AutoTokenizer.from_pretrained(args.tokenizer ) a = AutoConfig.from_pretrained(args.pretrained_model_config ) a = tokenizer.vocab_size a = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) ) if not training_records: raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' ) a = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) ) if not eval_records: raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' ) a = count_samples(__lowerCamelCase ) a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) a = steps_per_epoch * args.num_epochs with strategy.scope(): a = TFAutoModelForMaskedLM.from_config(__lowerCamelCase ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built a , a = create_optimizer( num_train_steps=__lowerCamelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=__lowerCamelCase , metrics=["""accuracy"""] ) def decode_fn(__lowerCamelCase ): a = { """input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), """attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(__lowerCamelCase , __lowerCamelCase ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. a = DataCollatorForLanguageModeling( tokenizer=__lowerCamelCase , mlm_probability=args.mlm_probability , mlm=__lowerCamelCase , return_tensors="""tf""" ) def mask_with_collator(__lowerCamelCase ): # TF really needs an isin() function a = ( ~tf.cast(batch["""attention_mask"""] , tf.bool ) | (batch["""input_ids"""] == tokenizer.cls_token_id) | (batch["""input_ids"""] == tokenizer.sep_token_id) ) a , a = data_collator.tf_mask_tokens( batch["""input_ids"""] , vocab_size=len(__lowerCamelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__lowerCamelCase , ) return batch a = args.per_replica_batch_size * strategy.num_replicas_in_sync a = prepare_dataset( __lowerCamelCase , decode_fn=__lowerCamelCase , mask_fn=__lowerCamelCase , batch_size=__lowerCamelCase , shuffle=__lowerCamelCase , shuffle_buffer_size=args.shuffle_buffer_size , ) a = prepare_dataset( __lowerCamelCase , decode_fn=__lowerCamelCase , mask_fn=__lowerCamelCase , batch_size=__lowerCamelCase , shuffle=__lowerCamelCase , ) a = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__lowerCamelCase ) ) model.fit( __lowerCamelCase , validation_data=__lowerCamelCase , epochs=args.num_epochs , callbacks=__lowerCamelCase , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __UpperCamelCase : Optional[Any] = parse_args() main(args)
347
__UpperCamelCase : Dict = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def __A ( ) -> None: a = input("""Enter message: """ ) a = input("""Enter key [alphanumeric]: """ ) a = input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): a = """encrypt""" a = encrypt_message(__lowerCamelCase , __lowerCamelCase ) elif mode.lower().startswith("""d""" ): a = """decrypt""" a = decrypt_message(__lowerCamelCase , __lowerCamelCase ) print(f'\n{mode.title()}ed message:' ) print(__lowerCamelCase ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> str: return translate_message(__lowerCamelCase , __lowerCamelCase , """encrypt""" ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> str: return translate_message(__lowerCamelCase , __lowerCamelCase , """decrypt""" ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str: a = [] a = 0 a = key.upper() for symbol in message: a = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(__lowerCamelCase ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(__lowerCamelCase ): a = 0 else: translated.append(__lowerCamelCase ) return "".join(__lowerCamelCase ) if __name__ == "__main__": main()
347
1
import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": __UpperCamelCase : int = argparse.ArgumentParser( description=( "Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned" " Distillation" ) ) parser.add_argument("--model_type", default="bert", choices=["bert"]) parser.add_argument("--model_name", default="bert-base-uncased", type=str) parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str) parser.add_argument("--vocab_transform", action="store_true") __UpperCamelCase : List[str] = parser.parse_args() if args.model_type == "bert": __UpperCamelCase : Dict = BertForMaskedLM.from_pretrained(args.model_name) __UpperCamelCase : Tuple = "bert" else: raise ValueError("args.model_type should be \"bert\".") __UpperCamelCase : List[str] = model.state_dict() __UpperCamelCase : Any = {} for w in ["word_embeddings", "position_embeddings"]: __UpperCamelCase : Tuple = state_dict[F'{prefix}.embeddings.{w}.weight'] for w in ["weight", "bias"]: __UpperCamelCase : Optional[int] = state_dict[F'{prefix}.embeddings.LayerNorm.{w}'] __UpperCamelCase : str = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: __UpperCamelCase : List[Any] = state_dict[ F'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}' ] __UpperCamelCase : Dict = state_dict[ F'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}' ] __UpperCamelCase : Union[str, Any] = state_dict[ F'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}' ] __UpperCamelCase : Optional[int] = state_dict[ F'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}' ] __UpperCamelCase : int = state_dict[ F'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}' ] __UpperCamelCase : str = state_dict[ F'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}' ] __UpperCamelCase : Optional[int] = state_dict[ F'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}' ] __UpperCamelCase : int = state_dict[ F'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}' ] std_idx += 1 __UpperCamelCase : List[str] = state_dict["cls.predictions.decoder.weight"] __UpperCamelCase : Tuple = state_dict["cls.predictions.bias"] if args.vocab_transform: for w in ["weight", "bias"]: __UpperCamelCase : List[Any] = state_dict[F'cls.predictions.transform.dense.{w}'] __UpperCamelCase : Optional[Any] = state_dict[F'cls.predictions.transform.LayerNorm.{w}'] print(F'N layers selected for distillation: {std_idx}') print(F'Number of params transferred for distillation: {len(compressed_sd.keys())}') print(F'Save transferred checkpoint to {args.dump_checkpoint}.') torch.save(compressed_sd, args.dump_checkpoint)
347
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class __lowerCAmelCase ( unittest.TestCase ): def __init__( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Tuple=13 , __magic_name__ :List[Any]=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :List[str]=True , __magic_name__ :str=99 , __magic_name__ :Optional[Any]=32 , __magic_name__ :Union[str, Any]=5 , __magic_name__ :Any=4 , __magic_name__ :int=37 , __magic_name__ :Tuple="gelu" , __magic_name__ :List[str]=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :Tuple=512 , __magic_name__ :Dict=16 , __magic_name__ :Optional[int]=2 , __magic_name__ :Optional[int]=0.02 , __magic_name__ :Optional[Any]=4 , ): '''simple docstring''' a = parent a = batch_size a = seq_length a = is_training a = use_attention_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_choices def lowerCamelCase__ ( self :int ): '''simple docstring''' a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_attention_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = self.prepare_config_and_inputs() a , a , a , a = config_and_inputs a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = self.prepare_config_and_inputs() a , a , a , a = config_and_inputs a = True a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): UpperCamelCase__ = True UpperCamelCase__ = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = FlaxRobertaModelTester(self ) @slow def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: a = model_class_name.from_pretrained("""roberta-base""" , from_pt=__magic_name__ ) a = model(np.ones((1, 1) ) ) self.assertIsNotNone(__magic_name__ )
347
1
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin __UpperCamelCase : Optional[int] = ''' Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] ''' class __lowerCAmelCase ( unittest.TestCase , SCREAMING_SNAKE_CASE_ ): def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = load_tool("""text-question-answering""" ) self.tool.setup() a = load_tool("""text-question-answering""" , remote=__a ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = self.tool(__a , """What did Hugging Face do in April 2021?""" ) self.assertEqual(__a , """launched the BigScience Research Workshop""" ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.remote_tool(__a , """What did Hugging Face do in April 2021?""" ) self.assertEqual(__a , """launched the BigScience Research Workshop""" ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = self.tool(text=__a , question="""What did Hugging Face do in April 2021?""" ) self.assertEqual(__a , """launched the BigScience Research Workshop""" ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.remote_tool(text=__a , question="""What did Hugging Face do in April 2021?""" ) self.assertEqual(__a , """launched the BigScience Research Workshop""" )
350
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline __UpperCamelCase : Any = datasets.utils.logging.get_logger(__name__) @dataclass class __lowerCAmelCase ( datasets.BuilderConfig ): UpperCamelCase__ = None UpperCamelCase__ = "utf-8" UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = True # deprecated UpperCamelCase__ = None # deprecated UpperCamelCase__ = 10 << 20 # 10MB UpperCamelCase__ = None class __lowerCAmelCase ( datasets.ArrowBasedBuilder ): UpperCamelCase__ = JsonConfig def lowerCamelCase__ ( self :str ): '''simple docstring''' if self.config.block_size is not None: logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" ) a = self.config.block_size if self.config.use_threads is not True: logger.warning( """The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" ) if self.config.newlines_in_values is not None: raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" ) return datasets.DatasetInfo(features=self.config.features ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ): '''simple docstring''' if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) a = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__magic_name__ , (str, list, tuple) ): a = data_files if isinstance(__magic_name__ , __magic_name__ ): a = [files] a = [dl_manager.iter_files(__magic_name__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] a = [] for split_name, files in data_files.items(): if isinstance(__magic_name__ , __magic_name__ ): a = [files] a = [dl_manager.iter_files(__magic_name__ ) for file in files] splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"""files""": files} ) ) return splits def lowerCamelCase__ ( self :List[str] , __magic_name__ :pa.Table ): '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): a = self.config.features.arrow_schema.field(__magic_name__ ).type a = pa_table.append_column(__magic_name__ , pa.array([None] * len(__magic_name__ ) , type=__magic_name__ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example a = table_cast(__magic_name__ , self.config.features.arrow_schema ) return pa_table def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Union[str, Any] ): '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: a = json.load(__magic_name__ ) # We keep only the field we are interested in a = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(__magic_name__ , (list, tuple) ): a = set().union(*[row.keys() for row in dataset] ) a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys} else: a = dataset a = pa.Table.from_pydict(__magic_name__ ) yield file_idx, self._cast_table(__magic_name__ ) # If the file has one json object per line else: with open(__magic_name__ , """rb""" ) as f: a = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small a = max(self.config.chunksize // 32 , 16 << 10 ) a = ( self.config.encoding_errors if self.config.encoding_errors is not None else """strict""" ) while True: a = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(__magic_name__ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": a = batch.decode(self.config.encoding , errors=__magic_name__ ).encode("""utf-8""" ) try: while True: try: a = paj.read_json( io.BytesIO(__magic_name__ ) , read_options=paj.ReadOptions(block_size=__magic_name__ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(__magic_name__ , pa.ArrowInvalid ) and "straddling" not in str(__magic_name__ ) or block_size > len(__magic_name__ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F'Batch of {len(__magic_name__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( __magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: a = json.load(__magic_name__ ) except json.JSONDecodeError: logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(__magic_name__ , __magic_name__ ): # list is the only sequence type supported in JSON try: a = set().union(*[row.keys() for row in dataset] ) a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys} a = pa.Table.from_pydict(__magic_name__ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' ) raise ValueError(F'Not able to read records in the JSON file at {file}.' ) from None yield file_idx, self._cast_table(__magic_name__ ) break else: logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' ) raise ValueError( F'Not able to read records in the JSON file at {file}. ' F'You should probably indicate the field of the JSON file containing your records. ' F'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ' F'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(__magic_name__ ) batch_idx += 1
347
0
import math import sys def __A ( __lowerCamelCase ) -> int: if number != int(lowercase__ ): raise ValueError("""the value of input must be a natural number""" ) if number < 0: raise ValueError("""the value of input must not be a negative number""" ) if number == 0: return 1 a = [-1] * (number + 1) a = 0 for i in range(1 , number + 1 ): a = sys.maxsize a = int(math.sqrt(lowercase__ ) ) for j in range(1 , root + 1 ): a = 1 + answers[i - (j**2)] a = min(lowercase__ , lowercase__ ) a = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
351
import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __UpperCamelCase : Dict = logging.get_logger(__name__) class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = ['''input_ids''', '''attention_mask'''] def __init__( self :List[str] , __magic_name__ :int="</s>" , __magic_name__ :List[Any]="<unk>" , __magic_name__ :Optional[Any]="<pad>" , __magic_name__ :Optional[int]=125 , __magic_name__ :List[str]=None , **__magic_name__ :List[str] , ): '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: a = [F'<extra_id_{i}>' for i in range(__magic_name__ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens a = len(set(filter(lambda __magic_name__ : bool("""extra_id""" in str(__magic_name__ ) ) , __magic_name__ ) ) ) if extra_tokens != extra_ids: raise ValueError( F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are' """ provided to ByT5Tokenizer. In this case the additional_special_tokens must include the""" """ extra_ids tokens""" ) a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token super().__init__( eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , extra_ids=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , ) a = extra_ids a = 2**8 # utf is 8 bits # define special tokens dict a = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } a = len(self.special_tokens_encoder ) a = len(__magic_name__ ) for i, token in enumerate(__magic_name__ ): a = self.vocab_size + i - n a = {v: k for k, v in self.special_tokens_encoder.items()} @property def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def lowerCamelCase__ ( self :Any , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None , __magic_name__ :bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(__magic_name__ )) + [1] return ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1] def lowerCamelCase__ ( self :str , __magic_name__ :List[int] ): '''simple docstring''' if len(__magic_name__ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated' """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ): '''simple docstring''' a = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ): '''simple docstring''' a = self._add_eos_if_not_present(__magic_name__ ) if token_ids_a is None: return token_ids_a else: a = self._add_eos_if_not_present(__magic_name__ ) return token_ids_a + token_ids_a def lowerCamelCase__ ( self :List[str] , __magic_name__ :str ): '''simple docstring''' a = [chr(__magic_name__ ) for i in text.encode("""utf-8""" )] return tokens def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ): '''simple docstring''' if token in self.special_tokens_encoder: a = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: a = self.added_tokens_encoder[token] elif len(__magic_name__ ) != 1: a = self.unk_token_id else: a = ord(__magic_name__ ) + self._num_special_tokens return token_id def lowerCamelCase__ ( self :List[str] , __magic_name__ :Dict ): '''simple docstring''' if index in self.special_tokens_decoder: a = self.special_tokens_decoder[index] else: a = chr(index - self._num_special_tokens ) return token def lowerCamelCase__ ( self :Tuple , __magic_name__ :Optional[int] ): '''simple docstring''' a = b"""""" for token in tokens: if token in self.special_tokens_decoder: a = self.special_tokens_decoder[token].encode("""utf-8""" ) elif token in self.added_tokens_decoder: a = self.special_tokens_decoder[token].encode("""utf-8""" ) elif token in self.special_tokens_encoder: a = token.encode("""utf-8""" ) elif token in self.added_tokens_encoder: a = token.encode("""utf-8""" ) else: a = bytes([ord(__magic_name__ )] ) bstring += tok_string a = bstring.decode("""utf-8""" , errors="""ignore""" ) return string def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :str , __magic_name__ :Optional[str] = None ): '''simple docstring''' return ()
347
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase : Union[str, Any] = { "configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Any = [ "PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST", "PegasusXForConditionalGeneration", "PegasusXModel", "PegasusXPreTrainedModel", ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys __UpperCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
352
import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __lowerCAmelCase : def __init__( self :Optional[int] , __magic_name__ :str , __magic_name__ :int=2 , __magic_name__ :List[str]=3 , __magic_name__ :Optional[int]=4 , __magic_name__ :str=2 , __magic_name__ :Any=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Dict=True , __magic_name__ :List[Any]=99 , __magic_name__ :Dict=36 , __magic_name__ :Optional[Any]=3 , __magic_name__ :str=4 , __magic_name__ :Optional[Any]=37 , __magic_name__ :Dict="gelu" , __magic_name__ :Any=0.1 , __magic_name__ :Union[str, Any]=0.1 , __magic_name__ :Dict=512 , __magic_name__ :str=16 , __magic_name__ :List[Any]=2 , __magic_name__ :Tuple=0.02 , __magic_name__ :Any=6 , __magic_name__ :Optional[int]=6 , __magic_name__ :Tuple=3 , __magic_name__ :str=4 , __magic_name__ :List[str]=None , __magic_name__ :str=1000 , ): '''simple docstring''' a = parent a = batch_size a = num_channels a = image_size a = patch_size a = text_seq_length a = is_training a = use_input_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = coordinate_size a = shape_size a = num_labels a = num_choices a = scope a = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) a = text_seq_length a = (image_size // patch_size) ** 2 + 1 a = self.text_seq_length + self.image_seq_length def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) a = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: a = bbox[i, j, 3] a = bbox[i, j, 1] a = t if bbox[i, j, 2] < bbox[i, j, 0]: a = bbox[i, j, 2] a = bbox[i, j, 0] a = t a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a = None if self.use_input_mask: a = random_attention_mask([self.batch_size, self.text_seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) a = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowerCamelCase__ ( self :int , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :List[str] , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int ): '''simple docstring''' a = LayoutLMvaModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() # text + image a = model(__magic_name__ , pixel_values=__magic_name__ ) a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , token_type_ids=__magic_name__ ) a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only a = model(__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only a = model(pixel_values=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :List[Any] , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :List[str] , __magic_name__ :List[str] ): '''simple docstring''' a = self.num_labels a = LayoutLMvaForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :Dict , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :List[str] , __magic_name__ :Tuple ): '''simple docstring''' a = self.num_labels a = LayoutLMvaForTokenClassification(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :str , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :str , __magic_name__ :List[str] , __magic_name__ :Optional[int] , __magic_name__ :Optional[Any] ): '''simple docstring''' a = LayoutLMvaForQuestionAnswering(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) = config_and_inputs a = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) UpperCamelCase__ = ( {'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel} if is_torch_available() else {} ) def lowerCamelCase__ ( self :List[str] , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :List[Any] ): '''simple docstring''' return True def lowerCamelCase__ ( self :int ): '''simple docstring''' a = LayoutLMvaModelTester(self ) a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :Any=False ): '''simple docstring''' a = copy.deepcopy(__magic_name__ ) if model_class in get_values(__magic_name__ ): a = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(__magic_name__ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__magic_name__ ): a = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) elif model_class in get_values(__magic_name__ ): a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) elif model_class in [ *get_values(__magic_name__ ), ]: a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) elif model_class in [ *get_values(__magic_name__ ), ]: a = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__magic_name__ , ) return inputs_dict def lowerCamelCase__ ( self :str ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a = type self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__magic_name__ ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__magic_name__ ) @slow def lowerCamelCase__ ( self :int ): '''simple docstring''' for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = LayoutLMvaModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def __A ( ) -> str: a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None @slow def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(__magic_name__ ) a = self.default_image_processor a = prepare_img() a = image_processor(images=__magic_name__ , return_tensors="""pt""" ).pixel_values.to(__magic_name__ ) a = torch.tensor([[1, 2]] ) a = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass a = model( input_ids=input_ids.to(__magic_name__ ) , bbox=bbox.to(__magic_name__ ) , pixel_values=pixel_values.to(__magic_name__ ) , ) # verify the logits a = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ ) a = torch.tensor( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
347
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCamelCase : List[str] = logging.get_logger(__name__) __UpperCamelCase : Any = { "facebook/data2vec-vision-base-ft": ( "https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json" ), } class __lowerCAmelCase ( A__ ): UpperCamelCase__ = '''data2vec-vision''' def __init__( self :Union[str, Any] , __magic_name__ :str=768 , __magic_name__ :Dict=12 , __magic_name__ :List[Any]=12 , __magic_name__ :List[Any]=3072 , __magic_name__ :Optional[Any]="gelu" , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.0 , __magic_name__ :Tuple=0.02 , __magic_name__ :Union[str, Any]=1E-1_2 , __magic_name__ :Any=224 , __magic_name__ :Dict=16 , __magic_name__ :int=3 , __magic_name__ :str=False , __magic_name__ :List[Any]=False , __magic_name__ :str=False , __magic_name__ :Tuple=False , __magic_name__ :Tuple=0.1 , __magic_name__ :int=0.1 , __magic_name__ :int=True , __magic_name__ :Dict=[3, 5, 7, 11] , __magic_name__ :Optional[int]=[1, 2, 3, 6] , __magic_name__ :str=True , __magic_name__ :int=0.4 , __magic_name__ :Any=256 , __magic_name__ :str=1 , __magic_name__ :int=False , __magic_name__ :Tuple=255 , **__magic_name__ :List[str] , ): '''simple docstring''' super().__init__(**__snake_case ) a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = initializer_range a = layer_norm_eps a = image_size a = patch_size a = num_channels a = use_mask_token a = use_absolute_position_embeddings a = use_relative_position_bias a = use_shared_relative_position_bias a = layer_scale_init_value a = drop_path_rate a = use_mean_pooling # decode head attributes (semantic segmentation) a = out_indices a = pool_scales # auxiliary head attributes (semantic segmentation) a = use_auxiliary_head a = auxiliary_loss_weight a = auxiliary_channels a = auxiliary_num_convs a = auxiliary_concat_input a = semantic_loss_ignore_index class __lowerCAmelCase ( A__ ): UpperCamelCase__ = version.parse('''1.11''' ) @property def lowerCamelCase__ ( self :int ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' return 1E-4
353
from copy import deepcopy class __lowerCAmelCase : def __init__( self :Union[str, Any] , __magic_name__ :list[int] | None = None , __magic_name__ :int | None = None ): '''simple docstring''' if arr is None and size is not None: a = size a = [0] * size elif arr is not None: self.init(__magic_name__ ) else: raise ValueError("""Either arr or size must be specified""" ) def lowerCamelCase__ ( self :Dict , __magic_name__ :list[int] ): '''simple docstring''' a = len(__magic_name__ ) a = deepcopy(__magic_name__ ) for i in range(1 , self.size ): a = self.next_(__magic_name__ ) if j < self.size: self.tree[j] += self.tree[i] def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): a = self.next_(__magic_name__ ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def lowerCamelCase__ ( __magic_name__ :int ): '''simple docstring''' return index + (index & (-index)) @staticmethod def lowerCamelCase__ ( __magic_name__ :int ): '''simple docstring''' return index - (index & (-index)) def lowerCamelCase__ ( self :Any , __magic_name__ :int , __magic_name__ :int ): '''simple docstring''' if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value a = self.next_(__magic_name__ ) def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :int , __magic_name__ :int ): '''simple docstring''' self.add(__magic_name__ , value - self.get(__magic_name__ ) ) def lowerCamelCase__ ( self :int , __magic_name__ :int ): '''simple docstring''' if right == 0: return 0 a = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] a = self.prev(__magic_name__ ) return result def lowerCamelCase__ ( self :int , __magic_name__ :int , __magic_name__ :int ): '''simple docstring''' return self.prefix(__magic_name__ ) - self.prefix(__magic_name__ ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :int ): '''simple docstring''' return self.query(__magic_name__ , index + 1 ) def lowerCamelCase__ ( self :Dict , __magic_name__ :int ): '''simple docstring''' value -= self.tree[0] if value < 0: return -1 a = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 a = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
347
0
import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __lowerCAmelCase ( a__ ): def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_lowerCamelCase , """embed_dim""" ) ) self.parent.assertTrue(hasattr(_lowerCamelCase , """num_heads""" ) ) class __lowerCAmelCase : def __init__( self :int , __magic_name__ :int , __magic_name__ :List[str]=13 , __magic_name__ :str=64 , __magic_name__ :str=3 , __magic_name__ :Optional[Any]=[16, 48, 96] , __magic_name__ :Dict=[1, 3, 6] , __magic_name__ :Tuple=[1, 2, 10] , __magic_name__ :List[str]=[7, 3, 3] , __magic_name__ :Any=[4, 2, 2] , __magic_name__ :str=[2, 1, 1] , __magic_name__ :List[str]=[2, 2, 2] , __magic_name__ :List[Any]=[False, False, True] , __magic_name__ :Any=[0.0, 0.0, 0.0] , __magic_name__ :int=0.02 , __magic_name__ :Tuple=1E-1_2 , __magic_name__ :Union[str, Any]=True , __magic_name__ :Optional[Any]=True , __magic_name__ :int=2 , ): '''simple docstring''' a = parent a = batch_size a = image_size a = patch_sizes a = patch_stride a = patch_padding a = is_training a = use_labels a = num_labels a = num_channels a = embed_dim a = num_heads a = stride_kv a = depth a = cls_token a = attention_drop_rate a = initializer_range a = layer_norm_eps def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.num_labels ) a = self.get_config() return config, pixel_values, labels def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :List[Any] , __magic_name__ :str , __magic_name__ :Optional[Any] ): '''simple docstring''' a = CvtModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() a = model(_lowerCamelCase ) a = (self.image_size, self.image_size) a = image_size[0], image_size[1] for i in range(len(self.depth ) ): a = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) a = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def lowerCamelCase__ ( self :List[str] , __magic_name__ :str , __magic_name__ :Dict , __magic_name__ :List[Any] ): '''simple docstring''' a = self.num_labels a = CvtForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() a = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = self.prepare_config_and_inputs() a = config_and_inputs a = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( a__ , a__ , unittest.TestCase ): UpperCamelCase__ = (CvtModel, CvtForImageClassification) if is_torch_available() else () UpperCamelCase__ = ( {'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification} if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = CvtModelTester(self ) a = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' return @unittest.skip(reason="""Cvt does not output attentions""" ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' pass @unittest.skip(reason="""Cvt does not use inputs_embeds""" ) def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' pass @unittest.skip(reason="""Cvt does not support input and output embeddings""" ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' pass def lowerCamelCase__ ( self :str ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(_lowerCamelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' def check_hidden_states_output(__magic_name__ :Any , __magic_name__ :List[Any] , __magic_name__ :Any ): a = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): a = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) a = outputs.hidden_states a = len(self.model_tester.depth ) self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCamelCase__ ( self :int ): '''simple docstring''' pass @slow def lowerCamelCase__ ( self :str ): '''simple docstring''' for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = CvtModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def __A ( ) -> Any: a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def lowerCamelCase__ ( self :str ): '''simple docstring''' a = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase ) a = self.default_image_processor a = prepare_img() a = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): a = model(**_lowerCamelCase ) # verify the logits a = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) a = torch.tensor([0.9285, 0.9015, -0.3150] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
354
from __future__ import annotations from typing import Generic, TypeVar __UpperCamelCase : Union[str, Any] = TypeVar("T") class __lowerCAmelCase ( Generic[T] ): def __init__( self :Tuple , __magic_name__ :T ): '''simple docstring''' a = data a = self a = 0 class __lowerCAmelCase ( Generic[T] ): def __init__( self :Tuple ): '''simple docstring''' a = {} def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :T ): '''simple docstring''' a = DisjointSetTreeNode(__magic_name__ ) def lowerCamelCase__ ( self :List[Any] , __magic_name__ :T ): '''simple docstring''' a = self.map[data] if elem_ref != elem_ref.parent: a = self.find_set(elem_ref.parent.data ) return elem_ref.parent def lowerCamelCase__ ( self :List[Any] , __magic_name__ :DisjointSetTreeNode[T] , __magic_name__ :DisjointSetTreeNode[T] ): '''simple docstring''' if nodea.rank > nodea.rank: a = nodea else: a = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :T , __magic_name__ :T ): '''simple docstring''' self.link(self.find_set(__magic_name__ ) , self.find_set(__magic_name__ ) ) class __lowerCAmelCase ( Generic[T] ): def __init__( self :Union[str, Any] ): '''simple docstring''' a = {} def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :T ): '''simple docstring''' if node not in self.connections: a = {} def lowerCamelCase__ ( self :Any , __magic_name__ :T , __magic_name__ :T , __magic_name__ :int ): '''simple docstring''' self.add_node(__magic_name__ ) self.add_node(__magic_name__ ) a = weight a = weight def lowerCamelCase__ ( self :int ): '''simple docstring''' a = [] a = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda __magic_name__ : x[2] ) # creating the disjoint set a = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(__magic_name__ ) # MST generation a = 0 a = 0 a = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: a , a , a = edges[index] index += 1 a = disjoint_set.find_set(__magic_name__ ) a = disjoint_set.find_set(__magic_name__ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(__magic_name__ , __magic_name__ , __magic_name__ ) disjoint_set.union(__magic_name__ , __magic_name__ ) return graph
347
0
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class __lowerCAmelCase ( lowercase__ ): def __init__( self :Union[str, Any] , __magic_name__ :Union[str, "sqlalchemy.sql.Selectable"] , __magic_name__ :Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __magic_name__ :Optional[Features] = None , __magic_name__ :str = None , __magic_name__ :bool = False , **__magic_name__ :List[Any] , ): '''simple docstring''' super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a ) a = Sql( cache_dir=_a , features=_a , sql=_a , con=_a , **_a , ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = None a = None a = None a = None self.builder.download_and_prepare( download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , ) # Build dataset for splits a = self.builder.as_dataset( split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory ) return dataset class __lowerCAmelCase : def __init__( self :Optional[Any] , __magic_name__ :Dataset , __magic_name__ :str , __magic_name__ :Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __magic_name__ :Optional[int] = None , __magic_name__ :Optional[int] = None , **__magic_name__ :int , ): '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(F'num_proc {num_proc} must be an integer > 0.' ) a = dataset a = name a = con a = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE a = num_proc a = to_sql_kwargs def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = self.to_sql_kwargs.pop("""sql""" , _a ) a = self.to_sql_kwargs.pop("""con""" , _a ) a = self.to_sql_kwargs.pop("""index""" , _a ) a = self._write(index=_a , **self.to_sql_kwargs ) return written def lowerCamelCase__ ( self :Any , __magic_name__ :Tuple ): '''simple docstring''' a = args a = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs a = query_table( table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , ) a = batch.to_pandas() a = df.to_sql(self.name , self.con , index=_a , **_a ) return num_rows or len(_a ) def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :List[str] , **__magic_name__ :List[Any] ): '''simple docstring''' a = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: a = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += num_rows return written
355
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = tempfile.mkdtemp() a = BlipImageProcessor() a = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) a = BlipProcessor(__magic_name__ , __magic_name__ ) processor.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self :List[Any] , **__magic_name__ :Union[str, Any] ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer def lowerCamelCase__ ( self :str , **__magic_name__ :List[str] ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor def lowerCamelCase__ ( self :int ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] a = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) a = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) a = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 ) a = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__magic_name__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __magic_name__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __magic_name__ ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = self.prepare_image_inputs() a = image_processor(__magic_name__ , return_tensors="""np""" ) a = processor(images=__magic_name__ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = """lower newer""" a = processor(text=__magic_name__ ) a = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = """lower newer""" a = self.prepare_image_inputs() a = processor(text=__magic_name__ , images=__magic_name__ ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(__magic_name__ ): processor() def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a = processor.batch_decode(__magic_name__ ) a = tokenizer.batch_decode(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = self.get_image_processor() a = self.get_tokenizer() a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) a = """lower newer""" a = self.prepare_image_inputs() a = processor(text=__magic_name__ , images=__magic_name__ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
347
0
"""simple docstring""" from __future__ import annotations import math def __A ( __lowerCamelCase ) -> list[int]: if num <= 0: a = f'{num}: Invalid input, please enter a positive integer.' raise ValueError(__snake_case ) a = [True] * (num + 1) a = [] a = 2 a = int(math.sqrt(__snake_case ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(__snake_case ) # Set multiples of start be False for i in range(start * start , num + 1 , __snake_case ): if sieve[i] is True: a = False start += 1 for j in range(end + 1 , num + 1 ): if sieve[j] is True: prime.append(__snake_case ) return prime if __name__ == "__main__": print(prime_sieve(int(input("Enter a positive integer: ").strip())))
356
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) __UpperCamelCase : int = { "shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json", # See all Nat models at https://huggingface.co/models?filter=nat } class __lowerCAmelCase ( __magic_name__ , __magic_name__ ): UpperCamelCase__ = '''nat''' UpperCamelCase__ = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self :Any , __magic_name__ :int=4 , __magic_name__ :Dict=3 , __magic_name__ :List[str]=64 , __magic_name__ :Optional[int]=[3, 4, 6, 5] , __magic_name__ :int=[2, 4, 8, 16] , __magic_name__ :str=7 , __magic_name__ :Tuple=3.0 , __magic_name__ :Dict=True , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.1 , __magic_name__ :Optional[Any]="gelu" , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :Tuple=1E-5 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :int=None , __magic_name__ :Any=None , **__magic_name__ :Dict , ): '''simple docstring''' super().__init__(**__magic_name__ ) a = patch_size a = num_channels a = embed_dim a = depths a = len(__magic_name__ ) a = num_heads a = kernel_size a = mlp_ratio a = qkv_bias a = hidden_dropout_prob a = attention_probs_dropout_prob a = drop_path_rate a = hidden_act a = layer_norm_eps a = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model a = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) ) a = layer_scale_init_value a = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(__magic_name__ ) + 1 )] a , a = get_aligned_output_features_output_indices( out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
347
0
from pathlib import Path import fire def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str: a = Path(__A ) a = Path(__A ) dest_dir.mkdir(exist_ok=__A ) for path in src_dir.iterdir(): a = [x.rstrip() for x in list(path.open().readlines() )][:n] a = dest_dir.joinpath(path.name ) print(__A ) dest_path.open("""w""" ).write("""\n""".join(__A ) ) if __name__ == "__main__": fire.Fire(minify)
357
import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]: if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer a = flax_key_tuple[:-1] + ("""weight""",) a = torch.permute(__lowerCamelCase , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ): # linear layer a = flax_key_tuple[:-1] + ("""weight""",) a = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: a = flax_key_tuple[:-1] + ("""weight""",) return flax_key_tuple, flax_tensor def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]: if "metadata" in layer: a = layer.split("""metadata""" ) a = """""".join(split_layer[0] )[:-1] a = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )] elif "kvstore" in layer: a = layer.split("""kvstore""" ) a = """""".join(split_layer[0] )[:-1] a = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )] else: a = layer.split("""/""" ) a = """/""".join(split_layer[:-1] ) a = (split_layer[-1],) if "kvstore/path" in layer: a = f'{switch_checkpoint_path}/{checkpoint_info[layer]}' elif "kvstore/driver" in layer: a = """file""" else: a = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def __A ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]: a = rename_keys(__lowerCamelCase ) a = {} for k, v in current_block.items(): a = v a = new_current_block torch.save(__lowerCamelCase , __lowerCamelCase ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = WEIGHTS_NAME ) -> List[str]: a = convert_file_size_to_int(__lowerCamelCase ) a = [] a = {} a = 0 a = 0 os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp: a = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""] a = flatten_dict(__lowerCamelCase , sep="""/""" ) a = {} for layer in checkpoint_info.keys(): a , a , a = get_key_and_tensorstore_dict( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if curr_real_layer_name in all_layers: a = content else: a = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file a = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() a = torch.tensor(__lowerCamelCase ) a = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts a , a = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __lowerCamelCase ) a = """/""".join(__lowerCamelCase ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: a = os.path.join( __lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) ) rename_and_save_block(__lowerCamelCase , __lowerCamelCase ) sharded_state_dicts.append(current_block.keys() ) del current_block a = {} a = 0 a = raw_weights.to(getattr(__lowerCamelCase , __lowerCamelCase ) ) current_block_size += weight_size total_size += weight_size # Add the last block a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) ) rename_and_save_block(__lowerCamelCase , __lowerCamelCase ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(__lowerCamelCase ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index a = {} a = {} for idx, shard in enumerate(__lowerCamelCase ): a = weights_name.replace( """.bin""" , f'-{idx+1:05d}-of-{len(__lowerCamelCase ):05d}.bin' ) # len(sharded_state_dicts):05d} a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{idx+1:05d}-of-???.bin' ) ) os.rename(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) ) a = shard for key in shard: a = shard_file # Add the metadata a = {"""total_size""": total_size} a = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f: a = json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase ) + """\n""" f.write(__lowerCamelCase ) return metadata, index if __name__ == "__main__": __UpperCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600", type=str, required=False, help="Path to a directory containing a folder per layer. Follows the original Google format.", ) parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size") parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model") parser.add_argument( "--pytorch_dump_folder_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted", type=str, required=False, help="Path to the output pytorch model.", ) __UpperCamelCase : Any = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def __A ( ) -> Tuple: from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer a = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" ) config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" ) a = SwitchTransformersForConditionalGeneration.from_pretrained( """/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" ) a = TaTokenizer.from_pretrained("""t5-small""" ) a = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""" a = tokenizer(__lowerCamelCase , return_tensors="""pt""" ).input_ids a = model.generate(__lowerCamelCase , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
347
0
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]: if exponent == 1: return base if exponent % 2 == 0: a = _modexpt(lowerCamelCase_ , exponent // 2 , lowerCamelCase_ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(lowerCamelCase_ , exponent - 1 , lowerCamelCase_ )) % modulo_value def __A ( __lowerCamelCase = 1777 , __lowerCamelCase = 1855 , __lowerCamelCase = 8 ) -> Dict: a = base for _ in range(1 , lowerCamelCase_ ): a = _modexpt(lowerCamelCase_ , lowerCamelCase_ , 10**digits ) return result if __name__ == "__main__": print(F'{solution() = }')
358
import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters __UpperCamelCase : Union[str, Any] = (720, 1_280) # Height, Width __UpperCamelCase : Any = (0.4, 0.6) # if height or width lower than this scale, drop it. __UpperCamelCase : str = 1 / 100 __UpperCamelCase : Optional[int] = "" __UpperCamelCase : List[Any] = "" __UpperCamelCase : Union[str, Any] = "" __UpperCamelCase : Tuple = 250 def __A ( ) -> None: a , a = get_dataset(__lowerCamelCase , __lowerCamelCase ) for index in range(__lowerCamelCase ): a = random.sample(range(len(__lowerCamelCase ) ) , 4 ) a , a , a = update_image_and_anno( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , filter_scale=__lowerCamelCase , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' a = random_chars(32 ) a = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0] a = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}' cva.imwrite(f'{file_root}.jpg' , __lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' ) a = [] for anno in new_annos: a = anno[3] - anno[1] a = anno[4] - anno[2] a = anno[1] + width / 2 a = anno[2] + height / 2 a = f'{anno[0]} {x_center} {y_center} {width} {height}' annos_list.append(__lowerCamelCase ) with open(f'{file_root}.txt' , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def __A ( __lowerCamelCase , __lowerCamelCase ) -> tuple[list, list]: a = [] a = [] for label_file in glob.glob(os.path.join(__lowerCamelCase , """*.txt""" ) ): a = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(__lowerCamelCase ) as in_file: a = in_file.readlines() a = os.path.join(__lowerCamelCase , f'{label_name}.jpg' ) a = [] for obj_list in obj_lists: a = obj_list.rstrip("""\n""" ).split(""" """ ) a = float(obj[1] ) - float(obj[3] ) / 2 a = float(obj[2] ) - float(obj[4] ) / 2 a = float(obj[1] ) + float(obj[3] ) / 2 a = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(__lowerCamelCase ) labels.append(__lowerCamelCase ) return img_paths, labels def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 0.0 , ) -> tuple[list, list, str]: a = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) a = int(scale_x * output_size[1] ) a = int(scale_y * output_size[0] ) a = [] a = [] for i, index in enumerate(__lowerCamelCase ): a = all_img_list[index] path_list.append(__lowerCamelCase ) a = all_annos[index] a = cva.imread(__lowerCamelCase ) if i == 0: # top-left a = cva.resize(__lowerCamelCase , (divid_point_x, divid_point_y) ) a = img for bbox in img_annos: a = bbox[1] * scale_x a = bbox[2] * scale_y a = bbox[3] * scale_x a = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right a = cva.resize(__lowerCamelCase , (output_size[1] - divid_point_x, divid_point_y) ) a = img for bbox in img_annos: a = scale_x + bbox[1] * (1 - scale_x) a = bbox[2] * scale_y a = scale_x + bbox[3] * (1 - scale_x) a = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left a = cva.resize(__lowerCamelCase , (divid_point_x, output_size[0] - divid_point_y) ) a = img for bbox in img_annos: a = bbox[1] * scale_x a = scale_y + bbox[2] * (1 - scale_y) a = bbox[3] * scale_x a = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right a = cva.resize( __lowerCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) a = img for bbox in img_annos: a = scale_x + bbox[1] * (1 - scale_x) a = scale_y + bbox[2] * (1 - scale_y) a = scale_x + bbox[3] * (1 - scale_x) a = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: a = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def __A ( __lowerCamelCase ) -> str: assert number_char > 1, "The number of character should greater than 1" a = ascii_lowercase + digits return "".join(random.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) ) if __name__ == "__main__": main() print("DONE ✅")
347
0