code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { 'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json', # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = """altclip_text_model""" def __init__( self , lowercase_=25_0002 , lowercase_=1024 , lowercase_=24 , lowercase_=16 , lowercase_=4096 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=514 , lowercase_=1 , lowercase_=0.02 , lowercase_=0.02 , lowercase_=1E-0_5 , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_="absolute" , lowercase_=True , lowercase_=768 , **lowercase_ , ): """simple docstring""" super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ ) UpperCAmelCase_ : Union[str, Any] = vocab_size UpperCAmelCase_ : Any = hidden_size UpperCAmelCase_ : List[str] = num_hidden_layers UpperCAmelCase_ : Union[str, Any] = num_attention_heads UpperCAmelCase_ : Optional[Any] = hidden_act UpperCAmelCase_ : Tuple = intermediate_size UpperCAmelCase_ : str = hidden_dropout_prob UpperCAmelCase_ : int = attention_probs_dropout_prob UpperCAmelCase_ : Tuple = max_position_embeddings UpperCAmelCase_ : Optional[Any] = type_vocab_size UpperCAmelCase_ : List[str] = initializer_range UpperCAmelCase_ : int = initializer_factor UpperCAmelCase_ : Tuple = layer_norm_eps UpperCAmelCase_ : int = position_embedding_type UpperCAmelCase_ : List[str] = use_cache UpperCAmelCase_ : Union[str, Any] = project_dim class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = """altclip_vision_model""" def __init__( self , lowercase_=768 , lowercase_=3072 , lowercase_=512 , lowercase_=12 , lowercase_=12 , lowercase_=3 , lowercase_=224 , lowercase_=32 , lowercase_="quick_gelu" , lowercase_=1E-5 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , **lowercase_ , ): """simple docstring""" super().__init__(**lowercase_ ) UpperCAmelCase_ : List[str] = hidden_size UpperCAmelCase_ : List[str] = intermediate_size UpperCAmelCase_ : Tuple = projection_dim UpperCAmelCase_ : Any = num_hidden_layers UpperCAmelCase_ : Optional[Any] = num_attention_heads UpperCAmelCase_ : Union[str, Any] = num_channels UpperCAmelCase_ : List[str] = patch_size UpperCAmelCase_ : Any = image_size UpperCAmelCase_ : Optional[int] = initializer_range UpperCAmelCase_ : Optional[Any] = initializer_factor UpperCAmelCase_ : int = attention_dropout UpperCAmelCase_ : Union[str, Any] = layer_norm_eps UpperCAmelCase_ : str = hidden_act @classmethod def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ): """simple docstring""" cls._set_token_in_kwargs(lowercase_ ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get("model_type" ) == "altclip": UpperCAmelCase_ : List[str] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(lowercase_ , **lowercase_ ) class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = """altclip""" SCREAMING_SNAKE_CASE__ : Optional[Any] = True def __init__( self , lowercase_=None , lowercase_=None , lowercase_=768 , lowercase_=2.65_92 , **lowercase_ ): """simple docstring""" # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). UpperCAmelCase_ : Dict = kwargs.pop("text_config_dict" , lowercase_ ) UpperCAmelCase_ : List[Any] = kwargs.pop("vision_config_dict" , lowercase_ ) super().__init__(**lowercase_ ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: UpperCAmelCase_ : Union[str, Any] = {} # This is the complete result when using `text_config_dict`. UpperCAmelCase_ : int = AltCLIPTextConfig(**lowercase_ ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: UpperCAmelCase_ : int = ( F"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """ F"""The value `text_config_dict[\"{key}\"]` will be used instead.""" ) # If inferred from default argument values (just to be super careful) else: UpperCAmelCase_ : Optional[int] = ( F"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """ F"""value `text_config[\"{key}\"]` will be overriden.""" ) logger.warning(lowercase_ ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: UpperCAmelCase_ : Optional[int] = {} # This is the complete result when using `vision_config_dict`. UpperCAmelCase_ : int = AltCLIPVisionConfig(**lowercase_ ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: UpperCAmelCase_ : Dict = { str(lowercase_ ): value for key, value in _vision_config_dict["id2label"].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: UpperCAmelCase_ : Optional[Any] = ( F"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """ F"""values. The value `vision_config_dict[\"{key}\"]` will be used instead.""" ) # If inferred from default argument values (just to be super careful) else: UpperCAmelCase_ : List[str] = ( F"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """ F"""The value `vision_config[\"{key}\"]` will be overriden.""" ) logger.warning(lowercase_ ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: UpperCAmelCase_ : str = {} logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." ) if vision_config is None: UpperCAmelCase_ : str = {} logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." ) UpperCAmelCase_ : Tuple = AltCLIPTextConfig(**lowercase_ ) UpperCAmelCase_ : List[str] = AltCLIPVisionConfig(**lowercase_ ) UpperCAmelCase_ : Optional[int] = projection_dim UpperCAmelCase_ : str = logit_scale_init_value UpperCAmelCase_ : Union[str, Any] = 1.0 @classmethod def UpperCamelCase__ ( cls , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = copy.deepcopy(self.__dict__ ) UpperCAmelCase_ : int = self.text_config.to_dict() UpperCAmelCase_ : Union[str, Any] = self.vision_config.to_dict() UpperCAmelCase_ : Dict = self.__class__.model_type return output
61
'''simple docstring''' import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowercase : Union[str, Any] = { 'text_branch': 'text_model', 'audio_branch': 'audio_model.audio_encoder', 'attn': 'attention.self', 'self.proj': 'output.dense', 'attention.self_mask': 'attn_mask', 'mlp.fc1': 'intermediate.dense', 'mlp.fc2': 'output.dense', 'norm1': 'layernorm_before', 'norm2': 'layernorm_after', 'bn0': 'batch_norm', } lowercase : Tuple = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc') def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' A, A : Tuple = create_model( '''HTSAT-tiny''' , '''roberta''' , snake_case__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case__ , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = {} A : str = R'''.*sequential.(\d+).*''' A : Union[str, Any] = R'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: A : Any = key.replace(snake_case__ , snake_case__ ) if re.match(snake_case__ , snake_case__ ): # replace sequential layers with list A : Any = re.match(snake_case__ , snake_case__ ).group(1 ) A : List[str] = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case__ )//3}.linear.' ) elif re.match(snake_case__ , snake_case__ ): A : Union[str, Any] = int(re.match(snake_case__ , snake_case__ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... A : str = 1 if projecton_layer == 0 else 2 A : Optional[Any] = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' ) if "audio" and "qkv" in key: # split qkv into query key and value A : int = value A : List[Any] = mixed_qkv.size(0 ) // 3 A : Union[str, Any] = mixed_qkv[:qkv_dim] A : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2] A : Optional[int] = mixed_qkv[qkv_dim * 2 :] A : Tuple = query_layer A : Union[str, Any] = key_layer A : Optional[int] = value_layer else: A : Dict = value return model_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' A, A : int = init_clap(snake_case__ , enable_fusion=snake_case__ ) clap_model.eval() A : str = clap_model.state_dict() A : Union[str, Any] = rename_state_dict(snake_case__ ) A : Tuple = ClapConfig() A : str = enable_fusion A : str = ClapModel(snake_case__ ) # ignore the spectrogram embedding layer model.load_state_dict(snake_case__ , strict=snake_case__ ) model.save_pretrained(snake_case__ ) transformers_config.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not') lowercase : Tuple = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
3
0
# Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union _A = re.compile(R'^(?P<major>\d+)' R'\.(?P<minor>\d+)' R'\.(?P<patch>\d+)$') @total_ordering @dataclass class UpperCAmelCase__ : """simple docstring""" UpperCAmelCase__ : str UpperCAmelCase__ : Optional[str] = None UpperCAmelCase__ : Optional[Union[str, int]] = None UpperCAmelCase__ : Optional[Union[str, int]] = None UpperCAmelCase__ : Optional[Union[str, int]] = None def _a ( self ) -> Dict: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =_str_to_version_tuple(self.version_str ) def __repr__( self ) -> List[Any]: return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}' @property def _a ( self ) -> List[str]: return self.major, self.minor, self.patch def _a ( self , A_ ) -> Optional[Any]: if isinstance(A_ , A_ ): return Version(A_ ) elif isinstance(A_ , A_ ): return other raise TypeError(f'{other} (type {type(A_ )}) cannot be compared to version.' ) def __eq__( self , A_ ) -> Union[str, Any]: try: __UpperCamelCase =self._validate_operand(A_ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self , A_ ) -> Optional[int]: __UpperCamelCase =self._validate_operand(A_ ) return self.tuple < other.tuple def __hash__( self ) -> Optional[Any]: return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def _a ( cls , A_ ) -> Tuple: __UpperCamelCase ={f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def _a ( self ) -> str: return self.version_str def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =_VERSION_REG.match(SCREAMING_SNAKE_CASE__ ) if not res: raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' ) return tuple(int(SCREAMING_SNAKE_CASE__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ): return ".".join(str(SCREAMING_SNAKE_CASE__ ) for v in version_tuple )
62
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging lowercase : Dict = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: A : Union[str, Any] = os.path.abspath(snake_case__ ) logger.info(F'Loading PyTorch weights from {pt_path}' ) A : Any = torch.load(snake_case__ , map_location='''cpu''' ) logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' ) A : List[str] = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files A : Any = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ ) return flax_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' def is_key_or_prefix_key_in_dict(snake_case__ ) -> bool: return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0 # layer norm A : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean A : Tuple = pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var A : Dict = pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # embedding A : Any = pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # conv layer A : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ): A : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer A : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ): A : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight A : Dict = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias A : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 A : Dict = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): A : List[Any] = pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): A : List[str] = pt_tuple_key[-2] + '''_v''' if name is not None: A : int = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} A : int = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: A : List[str] = flax_model.params['''params'''] else: A : Dict = flax_model.params A : List[Any] = flatten_dict(snake_case__ ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : List[str] = flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(snake_case__ ) A : int = {} A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : int = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : str = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : Union[str, Any] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Any = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Dict = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : Any = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: A : Tuple = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : List[str] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch # Load the index A : Union[str, Any] = {} for shard_file in shard_filenames: # load using msgpack utils A : List[str] = torch.load(snake_case__ ) A : int = {k: v.numpy() for k, v in pt_state_dict.items()} A : Tuple = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : Optional[int] = flax_model.params['''params'''] A : List[Any] = flatten_dict(snake_case__ ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: A : Dict = flax_model.params A : Tuple = flatten_dict(snake_case__ ) A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : List[str] = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : int = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : List[str] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Union[str, Any] = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Any = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : int = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue if "var" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = os.path.abspath(snake_case__ ) logger.info(F'Loading Flax weights from {flax_checkpoint_path}' ) # import correct flax class A : List[str] = getattr(snake_case__ , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(snake_case__ , '''rb''' ) as state_f: try: A : int = from_bytes(snake_case__ , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights A : List[str] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) A : Optional[Any] = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) A : Union[str, Any] = flatten_dict(snake_case__ ) A : List[Any] = pt_model.state_dict() A : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) A : Tuple = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys A : int = [] A : Any = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): A : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix A : int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: A : List[str] = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: A : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict: # conv layer A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) A : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict: # linear layer A : Tuple = flax_key_tuple[:-1] + ('''weight''',) A : Tuple = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: A : List[Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: A : Union[str, Any] = '''.'''.join(snake_case__ ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. A : int = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: A : Optional[int] = key.split('''.''' ) A : Dict = None if key_components[-3::2] == ["parametrizations", "original0"]: A : List[str] = key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: A : List[Any] = key_components[-2] + '''_v''' if name is not None: A : str = key_components[:-3] + [name] A : Optional[Any] = '''.'''.join(snake_case__ ) A : Optional[Any] = key if flax_key in special_pt_names: A : Optional[Any] = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict A : Dict = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor A : Dict = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list A : List[Any] = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' ) if len(snake_case__ ) > 0: logger.warning( F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ''' use it for predictions and inference.''' ) else: logger.warning( F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'you can already use {pt_model.__class__.__name__} for predictions without further training.' ) return pt_model
3
0
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='naver-clova-ix/donut-base-finetuned-docvqa' __a =( 'This is a tool that answers a question about an document (pdf). It takes an input named `document` which ' 'should be the document containing the information, as well as a `question` that is the question about the ' 'document. It returns a text that contains the answer to the question.' ) __a ='document_qa' __a =AutoProcessor __a =VisionEncoderDecoderModel __a =['image', 'text'] __a =['text'] def __init__( self : Optional[Any] , *__a : Any , **__a : Union[str, Any] ): if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." ) super().__init__(*__a , **__a ) def UpperCamelCase__ ( self : Union[str, Any] , __a : "Image" , __a : str ): _a = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" _a = task_prompt.replace("{user_input}" , __a ) _a = self.pre_processor.tokenizer( __a , add_special_tokens=__a , return_tensors="pt" ).input_ids _a = self.pre_processor(__a , return_tensors="pt" ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def UpperCamelCase__ ( self : List[Any] , __a : List[Any] ): return self.model.generate( inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__a , ).sequences def UpperCamelCase__ ( self : str , __a : Tuple ): _a = self.pre_processor.batch_decode(__a )[0] _a = sequence.replace(self.pre_processor.tokenizer.eos_token , "" ) _a = sequence.replace(self.pre_processor.tokenizer.pad_token , "" ) _a = re.sub(r"<.*?>" , "" , __a , count=1 ).strip() # remove first task start token _a = self.pre_processor.tokenajson(__a ) return sequence["answer"]
63
'''simple docstring''' import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint lowercase : Optional[int] = { '169M': 12, '430M': 24, '1B5': 24, '3B': 32, '7B': 32, '14B': 40, } lowercase : Optional[Any] = { '169M': 7_68, '430M': 10_24, '1B5': 20_48, '3B': 25_60, '7B': 40_96, '14B': 51_20, } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[Any] = list(state_dict.keys() ) for name in state_dict_keys: A : str = state_dict.pop(snake_case__ ) # emb -> embedding if name.startswith('''emb.''' ): A : Optional[Any] = name.replace('''emb.''' , '''embeddings.''' ) # ln_0 -> pre_ln (only present at block 0) if name.startswith('''blocks.0.ln0''' ): A : Union[str, Any] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' ) # att -> attention A : int = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , snake_case__ ) # ffn -> feed_forward A : List[Any] = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , snake_case__ ) # time_mix_k -> time_mix_key and reshape if name.endswith('''.time_mix_k''' ): A : List[str] = name.replace('''.time_mix_k''' , '''.time_mix_key''' ) # time_mix_v -> time_mix_value and reshape if name.endswith('''.time_mix_v''' ): A : Union[str, Any] = name.replace('''.time_mix_v''' , '''.time_mix_value''' ) # time_mix_r -> time_mix_key and reshape if name.endswith('''.time_mix_r''' ): A : Union[str, Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' ) if name != "head.weight": A : List[Any] = '''rwkv.''' + name A : Dict = weight return state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=None ): '''simple docstring''' if tokenizer_file is None: print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' ) A : int = 5_0277 A : Optional[int] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' ) else: A : str = PreTrainedTokenizerFast(tokenizer_file=snake_case__ ) A : Any = len(snake_case__ ) tokenizer.save_pretrained(snake_case__ ) # 2. Build the config A : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: A : List[str] = candidate break if size is None: raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' ) if size not in possible_sizes: raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' ) A : Any = RwkvConfig( vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(snake_case__ ) # 3. Download model file then convert state_dict A : Union[str, Any] = hf_hub_download(snake_case__ , snake_case__ ) A : Tuple = torch.load(snake_case__ , map_location='''cpu''' ) A : List[Any] = convert_state_dict(snake_case__ ) # 4. Split in shards and save A, A : List[str] = shard_checkpoint(snake_case__ ) for shard_file, shard in shards.items(): torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) if index is not None: A : Dict = os.path.join(snake_case__ , snake_case__ ) # Save the index as well with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: A : List[Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n''' f.write(snake_case__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( '''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' ) A : List[Any] = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: A : Union[str, Any] = torch.load(os.path.join(snake_case__ , snake_case__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' ) A : int = AutoModelForCausalLM.from_pretrained(snake_case__ ) model.push_to_hub(snake_case__ , max_shard_size='''2GB''' ) tokenizer.push_to_hub(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.' ) parser.add_argument( '--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.' ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='Where to save the converted model.' ) parser.add_argument( '--tokenizer_file', default=None, type=str, help='Path to the tokenizer file to use (if not provided, only the model is converted).', ) parser.add_argument( '--size', default=None, type=str, help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Push to the Hub the converted model.', ) parser.add_argument( '--model_name', default=None, type=str, help='Name of the pushed model on the Hub, including the username / organization.', ) lowercase : Union[str, Any] = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
3
0
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation A_ = logging.get_logger(__name__) A_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} A_ = { '''tokenizer_file''': { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''', }, } A_ = { '''gpt-neox-20b''': 20_48, } class lowercase( __a ): '''simple docstring''' lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = ["input_ids", "attention_mask"] def __init__( self: Dict, a_: Dict=None, a_: Tuple=None, a_: Dict=None, a_: str="<|endoftext|>", a_: Union[str, Any]="<|endoftext|>", a_: Tuple="<|endoftext|>", a_: Optional[int]=False, **a_: str, ): '''simple docstring''' super().__init__( a_, a_, tokenizer_file=a_, unk_token=a_, bos_token=a_, eos_token=a_, add_prefix_space=a_, **a_, ) _snake_case : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""", a_ ) != add_prefix_space: _snake_case : Optional[int] = getattr(a_, pre_tok_state.pop("""type""" ) ) _snake_case : Any = add_prefix_space _snake_case : List[str] = pre_tok_class(**a_ ) _snake_case : Tuple = add_prefix_space def UpperCamelCase_ ( self: List[Any], a_: str, a_: Optional[str] = None ): '''simple docstring''' _snake_case : Dict = self._tokenizer.model.save(a_, name=a_ ) return tuple(a_ ) def UpperCamelCase_ ( self: List[Any], a_: "Conversation" ): '''simple docstring''' _snake_case : List[Any] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(a_, add_special_tokens=a_ ) + [self.eos_token_id] ) if len(a_ ) > self.model_max_length: _snake_case : int = input_ids[-self.model_max_length :] return input_ids
64
'''simple docstring''' from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING lowercase : str = logging.get_logger(__name__) @add_end_docstrings(__snake_case ) class A ( __snake_case ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) self.check_model_type(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A, A : Dict = {}, {} if padding is not None: A : List[str] = padding if truncation is not None: A : Dict = truncation if top_k is not None: A : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : int = {'''image''': image, '''question''': question} else: A : Any = image A : Any = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) return results def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" A : Union[str, Any] = load_image(inputs['''image'''] ) A : Optional[Any] = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE ) A : Dict = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(SCREAMING_SNAKE_CASE ) return model_inputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : List[Any] = self.model(**SCREAMING_SNAKE_CASE ) return model_outputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> int: """simple docstring""" if top_k > self.model.config.num_labels: A : Dict = self.model.config.num_labels if self.framework == "pt": A : Optional[int] = model_outputs.logits.sigmoid()[0] A, A : int = probs.topk(SCREAMING_SNAKE_CASE ) else: raise ValueError(F'Unsupported framework: {self.framework}' ) A : int = scores.tolist() A : List[str] = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
3
0
from __future__ import annotations from scipy.special import comb # type: ignore class A : def __init__(self : List[Any] , __UpperCAmelCase : list[tuple[float, float]] ) -> List[str]: """simple docstring""" UpperCAmelCase__ = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. UpperCAmelCase__ = len(__UpperCAmelCase ) - 1 def lowercase_ (self : int , __UpperCAmelCase : float ) -> list[float]: """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." UpperCAmelCase__ = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , __UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(__UpperCAmelCase ) , 5 ) == 1 return output_values def lowercase_ (self : Dict , __UpperCAmelCase : float ) -> tuple[float, float]: """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." UpperCAmelCase__ = self.basis_function(__UpperCAmelCase ) UpperCAmelCase__ = 0.0 UpperCAmelCase__ = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def lowercase_ (self : Optional[int] , __UpperCAmelCase : float = 0.01 ) -> Optional[int]: """simple docstring""" from matplotlib import pyplot as plt # type: ignore UpperCAmelCase__ = [] # x coordinates of points to plot UpperCAmelCase__ = [] # y coordinates of points to plot UpperCAmelCase__ = 0.0 while t <= 1: UpperCAmelCase__ = self.bezier_curve_function(__UpperCAmelCase ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size UpperCAmelCase__ = [i[0] for i in self.list_of_points] UpperCAmelCase__ = [i[1] for i in self.list_of_points] plt.plot( __UpperCAmelCase , __UpperCAmelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , ) plt.scatter(__UpperCAmelCase , __UpperCAmelCase , color="red" , label="Control Points" ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
65
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase : Union[str, Any] = logging.get_logger(__name__) lowercase : str = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class A ( __snake_case ): __magic_name__ = '''bert''' def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = vocab_size A : Optional[Any] = hidden_size A : List[Any] = num_hidden_layers A : List[str] = num_attention_heads A : Dict = hidden_act A : Optional[Any] = intermediate_size A : List[Any] = hidden_dropout_prob A : List[Any] = attention_probs_dropout_prob A : Optional[Any] = max_position_embeddings A : List[str] = type_vocab_size A : Dict = initializer_range A : str = layer_norm_eps A : int = position_embedding_type A : Dict = use_cache A : str = classifier_dropout class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A : Optional[int] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
3
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) def A_ ( _lowercase, _lowercase=False ): '''simple docstring''' snake_case_ :List[Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """deit.embeddings.cls_token"""), ("""dist_token""", """deit.embeddings.distillation_token"""), ("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """deit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" snake_case_ :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("""norm.weight""", """deit.layernorm.weight"""), ("""norm.bias""", """deit.layernorm.bias"""), ("""head.weight""", """cls_classifier.weight"""), ("""head.bias""", """cls_classifier.bias"""), ("""head_dist.weight""", """distillation_classifier.weight"""), ("""head_dist.bias""", """distillation_classifier.bias"""), ] ) return rename_keys def A_ ( _lowercase, _lowercase, _lowercase=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: snake_case_ :Any = """""" else: snake_case_ :str = """deit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ :str = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) snake_case_ :Any = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case_ :Dict = in_proj_weight[ : config.hidden_size, : ] snake_case_ :int = in_proj_bias[: config.hidden_size] snake_case_ :str = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ :str = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ :Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] snake_case_ :Union[str, Any] = in_proj_bias[-config.hidden_size :] def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' snake_case_ :int = dct.pop(_lowercase ) snake_case_ :Tuple = val def A_ ( ): '''simple docstring''' snake_case_ :Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ :Union[str, Any] = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ) return im @torch.no_grad() def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :List[Any] = DeiTConfig() # all deit models have fine-tuned heads snake_case_ :Optional[int] = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size snake_case_ :int = 1000 snake_case_ :Optional[int] = """huggingface/label-files""" snake_case_ :List[Any] = """imagenet-1k-id2label.json""" snake_case_ :str = json.load(open(hf_hub_download(_lowercase, _lowercase, repo_type="""dataset""" ), """r""" ) ) snake_case_ :Dict = {int(_lowercase ): v for k, v in idalabel.items()} snake_case_ :Optional[Any] = idalabel snake_case_ :Union[str, Any] = {v: k for k, v in idalabel.items()} snake_case_ :Any = int(deit_name[-6:-4] ) snake_case_ :Any = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("""tiny""" ): snake_case_ :Tuple = 192 snake_case_ :Optional[int] = 768 snake_case_ :Tuple = 12 snake_case_ :Tuple = 3 elif deit_name[9:].startswith("""small""" ): snake_case_ :List[Any] = 384 snake_case_ :Dict = 1536 snake_case_ :Optional[int] = 12 snake_case_ :str = 6 if deit_name[9:].startswith("""base""" ): pass elif deit_name[4:].startswith("""large""" ): snake_case_ :int = 1024 snake_case_ :List[Any] = 4096 snake_case_ :Any = 24 snake_case_ :Optional[int] = 16 # load original model from timm snake_case_ :int = timm.create_model(_lowercase, pretrained=_lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case_ :Any = timm_model.state_dict() snake_case_ :Optional[Any] = create_rename_keys(_lowercase, _lowercase ) for src, dest in rename_keys: rename_key(_lowercase, _lowercase, _lowercase ) read_in_q_k_v(_lowercase, _lowercase, _lowercase ) # load HuggingFace model snake_case_ :Union[str, Any] = DeiTForImageClassificationWithTeacher(_lowercase ).eval() model.load_state_dict(_lowercase ) # Check outputs on an image, prepared by DeiTImageProcessor snake_case_ :Optional[Any] = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 snake_case_ :Any = DeiTImageProcessor(size=_lowercase, crop_size=config.image_size ) snake_case_ :List[str] = image_processor(images=prepare_img(), return_tensors="""pt""" ) snake_case_ :Optional[Any] = encoding["""pixel_values"""] snake_case_ :Optional[Any] = model(_lowercase ) snake_case_ :Dict = timm_model(_lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_lowercase, outputs.logits, atol=1e-3 ) Path(_lowercase ).mkdir(exist_ok=_lowercase ) print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowercase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_lowercase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--deit_name", default="vit_deit_base_distilled_patch16_224", type=str, help="Name of the DeiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) __a = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
66
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : str = BeautifulSoup(requests.get(snake_case__ , params=snake_case__ ).content , '''html.parser''' ) A : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) A : Optional[int] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": lowercase : str = { 'title': ( 'Precisely geometry controlled microsupercapacitors for ultrahigh areal ' 'capacitance, volumetric capacitance, and energy density' ), 'journal': 'Chem. Mater.', 'volume': 30, 'pages': '3979-3990', 'year': 20_18, 'hl': 'en', } print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
3
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class a__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): lowerCamelCase : int =StableDiffusionPanoramaPipeline lowerCamelCase : Dict =TEXT_TO_IMAGE_PARAMS lowerCamelCase : Union[str, Any] =TEXT_TO_IMAGE_BATCH_PARAMS lowerCamelCase : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS lowerCamelCase : int =TEXT_TO_IMAGE_IMAGE_PARAMS def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) __lowerCamelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) __lowerCamelCase = DDIMScheduler() torch.manual_seed(0 ) __lowerCamelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) __lowerCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) __lowerCamelCase = CLIPTextModel(a ) __lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __lowerCamelCase = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : Dict , a : Union[str, Any]=0 ): """simple docstring""" __lowerCamelCase = torch.manual_seed(a ) __lowerCamelCase = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, # Setting height and width to None to prevent OOMs on CPU. '''height''': None, '''width''': None, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" __lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = StableDiffusionPanoramaPipeline(**a ) __lowerCamelCase = sd_pipe.to(a ) sd_pipe.set_progress_bar_config(disable=a ) __lowerCamelCase = self.get_dummy_inputs(a ) __lowerCamelCase = sd_pipe(**a ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCamelCase = np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" __lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = StableDiffusionPanoramaPipeline(**a ) __lowerCamelCase = sd_pipe.to(a ) sd_pipe.set_progress_bar_config(disable=a ) __lowerCamelCase = self.get_dummy_inputs(a ) __lowerCamelCase = '''french fries''' __lowerCamelCase = sd_pipe(**a , negative_prompt=a ) __lowerCamelCase = output.images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCamelCase = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" __lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = StableDiffusionPanoramaPipeline(**a ) __lowerCamelCase = sd_pipe.to(a ) sd_pipe.set_progress_bar_config(disable=a ) __lowerCamelCase = self.get_dummy_inputs(a ) __lowerCamelCase = sd_pipe(**a , view_batch_size=2 ) __lowerCamelCase = output.images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCamelCase = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" __lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = EulerAncestralDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' ) __lowerCamelCase = StableDiffusionPanoramaPipeline(**a ) __lowerCamelCase = sd_pipe.to(a ) sd_pipe.set_progress_bar_config(disable=a ) __lowerCamelCase = self.get_dummy_inputs(a ) __lowerCamelCase = sd_pipe(**a ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCamelCase = np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" __lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = PNDMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , skip_prk_steps=a ) __lowerCamelCase = StableDiffusionPanoramaPipeline(**a ) __lowerCamelCase = sd_pipe.to(a ) sd_pipe.set_progress_bar_config(disable=a ) __lowerCamelCase = self.get_dummy_inputs(a ) __lowerCamelCase = sd_pipe(**a ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCamelCase = np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class a__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self : str , a : str=0 ): """simple docstring""" __lowerCamelCase = torch.manual_seed(a ) __lowerCamelCase = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" __lowerCamelCase = '''stabilityai/stable-diffusion-2-base''' __lowerCamelCase = DDIMScheduler.from_pretrained(a , subfolder='''scheduler''' ) __lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(a , scheduler=a , safety_checker=a ) pipe.to(a ) pipe.set_progress_bar_config(disable=a ) pipe.enable_attention_slicing() __lowerCamelCase = self.get_inputs() __lowerCamelCase = pipe(**a ).images __lowerCamelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 20_48, 3) __lowerCamelCase = np.array( [ 0.36_96_83_92, 0.27_02_53_72, 0.32_44_67_66, 0.28_37_93_87, 0.36_36_32_74, 0.30_73_33_47, 0.27_10_00_27, 0.27_05_41_25, 0.25_53_60_96, ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" __lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-base''' , safety_checker=a ) __lowerCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(a ) pipe.set_progress_bar_config(disable=a ) pipe.enable_attention_slicing() __lowerCamelCase = self.get_inputs() __lowerCamelCase = pipe(**a ).images __lowerCamelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 20_48, 3) __lowerCamelCase = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" __lowerCamelCase = 0 def callback_fn(a : int , a : int , a : torch.FloatTensor ) -> None: __lowerCamelCase = True nonlocal number_of_steps number_of_steps += 1 if step == 1: __lowerCamelCase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 2_56) __lowerCamelCase = latents[0, -3:, -3:, -1] __lowerCamelCase = np.array( [ 0.18_68_18_69, 0.33_90_78_16, 0.5_36_12_76, 0.14_43_28_65, -0.02_85_66_11, -0.73_94_11_23, 0.23_39_79_87, 0.47_32_26_82, -0.37_82_31_64, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: __lowerCamelCase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 2_56) __lowerCamelCase = latents[0, -3:, -3:, -1] __lowerCamelCase = np.array( [ 0.18_53_96_45, 0.33_98_72_48, 0.5_37_85_59, 0.14_43_71_42, -0.02_45_52_61, -0.7_33_83_17, 0.23_99_07_55, 0.47_35_62_72, -0.3_78_65_05, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 __lowerCamelCase = False __lowerCamelCase = '''stabilityai/stable-diffusion-2-base''' __lowerCamelCase = DDIMScheduler.from_pretrained(a , subfolder='''scheduler''' ) __lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(a , scheduler=a , safety_checker=a ) __lowerCamelCase = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) pipe.enable_attention_slicing() __lowerCamelCase = self.get_inputs() pipe(**a , callback=a , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowerCamelCase = '''stabilityai/stable-diffusion-2-base''' __lowerCamelCase = DDIMScheduler.from_pretrained(a , subfolder='''scheduler''' ) __lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(a , scheduler=a , safety_checker=a ) __lowerCamelCase = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __lowerCamelCase = self.get_inputs() __lowerCamelCase = pipe(**a ) __lowerCamelCase = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
67
'''simple docstring''' class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Any = None A : Optional[Any] = None A : Tuple = graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Dict = len(SCREAMING_SNAKE_CASE ) A : Optional[Any] = None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if sources is int: A : Dict = [sources] if sinks is int: A : str = [sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return A : Optional[int] = sources[0] A : Union[str, Any] = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: A : Optional[int] = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A : Dict = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A : Dict = max_input_flow A : Tuple = 0 A : Tuple = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A : Optional[Any] = max_input_flow A : Optional[Any] = size - 1 def __lowerCAmelCase ( self ) -> Any: """simple docstring""" if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : List[Any] = algorithm(self ) class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = flow_network A : Optional[Any] = flow_network.verticesCount A : Tuple = flow_network.sourceIndex A : Dict = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A : str = flow_network.graph A : Optional[Any] = False def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" if not self.executed: self._algorithm() A : Optional[int] = True def __lowerCAmelCase ( self ) -> Any: """simple docstring""" pass class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result A : List[str] = -1 def __lowerCAmelCase ( self ) -> str: """simple docstring""" if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) A : Optional[Any] = [[0] * self.verticies_count for i in range(self.verticies_count )] A : Union[str, Any] = [0] * self.verticies_count A : List[Any] = [0] * self.verticies_count def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A : Optional[Any] = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A : Union[str, Any] = 0 while i < len(SCREAMING_SNAKE_CASE ): A : str = vertices_list[i] A : List[str] = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) A : int = 0 else: i += 1 A : Optional[Any] = sum(self.preflow[self.source_index] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Dict = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Dict = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A : Dict = self.heights[to_index] if min_height is not None: A : Dict = min_height + 1 if __name__ == "__main__": lowercase : Optional[int] = [0] lowercase : List[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowercase : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowercase : List[str] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowercase : List[str] = flow_network.find_maximum_flow() print(f'''maximum flow is {maximum_flow}''')
3
0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int: '''simple docstring''' try: A__ = int(SCREAMING_SNAKE_CASE_ ) except (TypeError, ValueError): raise TypeError("Parameter n must be int or castable to int." ) if n <= 0: raise ValueError("Parameter n must be greater than or equal to one." ) A__ = 2 A__ = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 A__ = i while n % i == 0: A__ = n // i i += 1 return int(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": print(f"""{solution() = }""")
68
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ = 10 ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ) or n < 0: raise ValueError('''Invalid input''' ) A : List[str] = 10**n A : Tuple = 2_8433 * (pow(2 , 783_0457 , snake_case__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(10) = }''')
3
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: __UpperCamelCase = None __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCamelCase = { '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json''' ), }, } __UpperCamelCase = { '''facebook/nllb-large-en-ro''': 1024, '''facebook/nllb-200-distilled-600M''': 1024, } # fmt: off __UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ = NllbTokenizer SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=False, **lowerCAmelCase__, ) -> List[str]: # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token snake_case_ = legacy_behaviour super().__init__( vocab_file=lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, src_lang=lowerCAmelCase__, tgt_lang=lowerCAmelCase__, additional_special_tokens=lowerCAmelCase__, legacy_behaviour=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = vocab_file snake_case_ = False if not self.vocab_file else True snake_case_ = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens}) snake_case_ = { lang_code: self.convert_tokens_to_ids(lowerCAmelCase__) for lang_code in FAIRSEQ_LANGUAGE_CODES } snake_case_ = src_lang if src_lang is not None else 'eng_Latn' snake_case_ = self.convert_tokens_to_ids(self._src_lang) snake_case_ = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def a_ ( self) -> str: return self._src_lang @src_lang.setter def a_ ( self, lowerCAmelCase__) -> None: snake_case_ = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> str: if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model') snake_case_ = src_lang snake_case_ = self(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__) snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__) snake_case_ = tgt_lang_id return inputs def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = "eng_Latn", lowerCAmelCase__ = None, lowerCAmelCase__ = "fra_Latn", **lowerCAmelCase__, ) -> BatchEncoding: snake_case_ = src_lang snake_case_ = tgt_lang return super().prepare_seqaseq_batch(lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self) -> List[Any]: return self.set_src_lang_special_tokens(self.src_lang) def a_ ( self) -> Tuple: return self.set_tgt_lang_special_tokens(self.tgt_lang) def a_ ( self, lowerCAmelCase__) -> None: snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__) if self.legacy_behaviour: snake_case_ = [] snake_case_ = [self.eos_token_id, self.cur_lang_code] else: snake_case_ = [self.cur_lang_code] snake_case_ = [self.eos_token_id] snake_case_ = self.convert_ids_to_tokens(self.prefix_tokens) snake_case_ = self.convert_ids_to_tokens(self.suffix_tokens) snake_case_ = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), ) def a_ ( self, lowerCAmelCase__) -> None: snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__) if self.legacy_behaviour: snake_case_ = [] snake_case_ = [self.eos_token_id, self.cur_lang_code] else: snake_case_ = [self.cur_lang_code] snake_case_ = [self.eos_token_id] snake_case_ = self.convert_ids_to_tokens(self.prefix_tokens) snake_case_ = self.convert_ids_to_tokens(self.suffix_tokens) snake_case_ = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), ) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.') if not os.path.isdir(lowerCAmelCase__): logger.error(f'Vocabulary path ({save_directory}) should be a directory.') return snake_case_ = os.path.join( lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__): copyfile(self.vocab_file, lowerCAmelCase__) return (out_vocab_file,)
69
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowercase : List[str] = logging.get_logger(__name__) lowercase : str = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class A ( __snake_case ): __magic_name__ = '''gpt_neo''' __magic_name__ = ['''past_key_values'''] __magic_name__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , SCREAMING_SNAKE_CASE=50257 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=24 , SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=50256 , SCREAMING_SNAKE_CASE=50256 , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" A : Union[str, Any] = vocab_size A : Optional[Any] = max_position_embeddings A : Dict = hidden_size A : Optional[Any] = num_layers A : Tuple = num_heads A : int = intermediate_size A : Optional[Any] = window_size A : List[Any] = activation_function A : Union[str, Any] = resid_dropout A : Any = embed_dropout A : List[Any] = attention_dropout A : str = classifier_dropout A : List[Any] = layer_norm_epsilon A : str = initializer_range A : List[str] = use_cache A : Optional[int] = bos_token_id A : List[Any] = eos_token_id A : int = attention_types A : int = self.expand_attention_types_params(SCREAMING_SNAKE_CASE ) if len(self.attention_layers ) != self.num_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.attention_layers)` == `config.num_layers` ''' F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, ' F'`config.num_layers = {self.num_layers}`. ' '''`config.attention_layers` is prepared using `config.attention_types`. ''' '''Please verify the value of `config.attention_types` argument.''' ) super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : List[str] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' import torch A : Tuple = input.size() A : Union[str, Any] = len(snake_case__ ) A : List[str] = shape[dimension] A : Union[str, Any] = torch.arange(0 , snake_case__ , snake_case__ ) A : List[str] = torch.div(sizedim - size , snake_case__ , rounding_mode='''floor''' ) + 1 A : Optional[int] = torch.arange(snake_case__ ) + low_indices[:min_length][:, None] A : str = [slice(snake_case__ )] * rank A : List[Any] = indices A : Union[str, Any] = input[s] A : List[str] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch A : List[str] = torch.arange(1 , snake_case__ ) A : Optional[int] = torch.remainder(snake_case__ , snake_case__ ) A : Optional[int] = remainders == 0 A : Optional[Any] = candidates[divisor_indices] A : Optional[int] = torch.max(snake_case__ ) return largest_divisor, torch.div(snake_case__ , snake_case__ , rounding_mode='''floor''' ) class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" A : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction='''inputs''' ) A : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: A : Dict = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return self._config.num_heads def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]: """simple docstring""" A : List[str] = super(SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs( SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() A : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch A, A : Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values A : str = seqlen + 2 A : List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A : Any = [ (torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] A : str = common_inputs['''attention_mask'''] if self.use_past: A : Optional[int] = ordered_inputs['''attention_mask'''].dtype A : List[str] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 ) return ordered_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return 13
3
0
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class UpperCAmelCase : def __init__( self : Optional[int] , __snake_case : int , __snake_case : Optional[Any]=2 , __snake_case : int=True , __snake_case : str=False , __snake_case : List[str]=10 , __snake_case : Union[str, Any]=3 , __snake_case : List[Any]=32 * 4 , __snake_case : str=32 * 6 , __snake_case : int=4 , __snake_case : str=32 , ) -> str: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = is_training _lowerCAmelCase = use_auxiliary_loss _lowerCAmelCase = num_queries _lowerCAmelCase = num_channels _lowerCAmelCase = min_size _lowerCAmelCase = max_size _lowerCAmelCase = num_labels _lowerCAmelCase = mask_feature_size def lowercase__ ( self : Optional[int] ) -> Optional[int]: _lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __snake_case ) _lowerCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__snake_case ) _lowerCAmelCase = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__snake_case ) > 0.5 ).float() _lowerCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=__snake_case ) > 0.5).long() _lowerCAmelCase = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase__ ( self : Any ) -> Union[str, Any]: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowercase__ ( self : Tuple ) -> Tuple: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.prepare_config_and_inputs() _lowerCAmelCase = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def lowercase__ ( self : List[Any] , __snake_case : str , __snake_case : Optional[int] ) -> List[Any]: _lowerCAmelCase = output.encoder_hidden_states _lowerCAmelCase = output.pixel_decoder_hidden_states _lowerCAmelCase = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__snake_case ) , config.decoder_config.decoder_layers ) def lowercase__ ( self : str , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict=False ) -> Dict: with torch.no_grad(): _lowerCAmelCase = MaskFormerModel(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(pixel_values=__snake_case , pixel_mask=__snake_case ) _lowerCAmelCase = model(__snake_case , output_hidden_states=__snake_case ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__snake_case , __snake_case ) def lowercase__ ( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : Dict , __snake_case : Dict , __snake_case : str ) -> str: _lowerCAmelCase = MaskFormerForInstanceSegmentation(config=__snake_case ) model.to(__snake_case ) model.eval() def comm_check_on_output(__snake_case : List[str] ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _lowerCAmelCase = model(pixel_values=__snake_case , pixel_mask=__snake_case ) _lowerCAmelCase = model(__snake_case ) comm_check_on_output(__snake_case ) _lowerCAmelCase = model( pixel_values=__snake_case , pixel_mask=__snake_case , mask_labels=__snake_case , class_labels=__snake_case ) comm_check_on_output(__snake_case ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): _lowercase: Any = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () _lowercase: int = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) _lowercase: Optional[int] = False _lowercase: Union[str, Any] = False _lowercase: Dict = False _lowercase: Union[str, Any] = False def lowercase__ ( self : Tuple ) -> List[Any]: _lowerCAmelCase = MaskFormerModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case ) def lowercase__ ( self : Dict ) -> Dict: self.config_tester.run_common_tests() def lowercase__ ( self : Optional[Any] ) -> str: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case ) def lowercase__ ( self : str ) -> Optional[int]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__snake_case ) @unittest.skip(reason="""MaskFormer does not use inputs_embeds""" ) def lowercase__ ( self : List[Any] ) -> List[str]: pass @unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" ) def lowercase__ ( self : Optional[Any] ) -> Optional[int]: pass @unittest.skip(reason="""MaskFormer is not a generative model""" ) def lowercase__ ( self : Union[str, Any] ) -> List[str]: pass @unittest.skip(reason="""MaskFormer does not use token embeddings""" ) def lowercase__ ( self : List[str] ) -> Any: pass @require_torch_multi_gpu @unittest.skip( reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def lowercase__ ( self : Optional[int] ) -> Optional[int]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowercase__ ( self : Optional[Any] ) -> Dict: pass def lowercase__ ( self : Tuple ) -> Union[str, Any]: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(__snake_case ) _lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase = [*signature.parameters.keys()] _lowerCAmelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __snake_case ) @slow def lowercase__ ( self : Optional[Any] ) -> str: for model_name in ["facebook/maskformer-swin-small-coco"]: _lowerCAmelCase = MaskFormerModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) def lowercase__ ( self : str ) -> int: _lowerCAmelCase = (self.model_tester.min_size,) * 2 _lowerCAmelCase = { """pixel_values""": torch.randn((2, 3, *size) , device=__snake_case ), """mask_labels""": torch.randn((2, 10, *size) , device=__snake_case ), """class_labels""": torch.zeros(2 , 10 , device=__snake_case ).long(), } _lowerCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__snake_case ) _lowerCAmelCase = model(**__snake_case ) self.assertTrue(outputs.loss is not None ) def lowercase__ ( self : Optional[Any] ) -> List[Any]: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case ) def lowercase__ ( self : str ) -> Optional[int]: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(__snake_case ).to(__snake_case ) _lowerCAmelCase = model(**__snake_case , output_attentions=__snake_case ) self.assertTrue(outputs.attentions is not None ) def lowercase__ ( self : Tuple ) -> str: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _lowerCAmelCase = self.all_model_classes[1] _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.train() _lowerCAmelCase = model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case ).loss loss.backward() def lowercase__ ( self : Dict ) -> int: # only MaskFormerForInstanceSegmentation has the loss _lowerCAmelCase = self.all_model_classes[1] _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() _lowerCAmelCase = True _lowerCAmelCase = True _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.train() _lowerCAmelCase = model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case ) _lowerCAmelCase = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCAmelCase = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _lowerCAmelCase = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCAmelCase = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__snake_case ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) A__ : int =1e-4 def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class UpperCAmelCase ( unittest.TestCase ): @cached_property def lowercase__ ( self : Dict ) -> Dict: return ( MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" ) if is_vision_available() else None ) def lowercase__ ( self : str ) -> Union[str, Any]: _lowerCAmelCase = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__snake_case ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case ) _lowerCAmelCase = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__snake_case , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _lowerCAmelCase = model(**__snake_case ) _lowerCAmelCase = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__snake_case ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) ) _lowerCAmelCase = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__snake_case ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) ) _lowerCAmelCase = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__snake_case ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __snake_case , atol=__snake_case ) ) def lowercase__ ( self : Any ) -> Tuple: _lowerCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__snake_case ) .eval() ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case ) _lowerCAmelCase = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__snake_case , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _lowerCAmelCase = model(**__snake_case ) # masks_queries_logits _lowerCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCAmelCase = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] _lowerCAmelCase = torch.tensor(__snake_case ).to(__snake_case ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __snake_case , atol=__snake_case ) ) # class_queries_logits _lowerCAmelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCAmelCase = torch.tensor( [ [1.6_5_1_2E0_0, -5.2_5_7_2E0_0, -3.3_5_1_9E0_0], [3.6_1_6_9E-0_2, -5.9_0_2_5E0_0, -2.9_3_1_3E0_0], [1.0_7_6_6E-0_4, -7.7_6_3_0E0_0, -5.1_2_6_3E0_0], ] ).to(__snake_case ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __snake_case , atol=__snake_case ) ) def lowercase__ ( self : Tuple ) -> Optional[Any]: _lowerCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" ) .to(__snake_case ) .eval() ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case ) _lowerCAmelCase = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__snake_case , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _lowerCAmelCase = model(**__snake_case ) # masks_queries_logits _lowerCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCAmelCase = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] _lowerCAmelCase = torch.tensor(__snake_case ).to(__snake_case ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __snake_case , atol=__snake_case ) ) # class_queries_logits _lowerCAmelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCAmelCase = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__snake_case ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __snake_case , atol=__snake_case ) ) def lowercase__ ( self : Optional[int] ) -> Union[str, Any]: _lowerCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__snake_case ) .eval() ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , ) _lowerCAmelCase = inputs["""pixel_values"""].to(__snake_case ) _lowerCAmelCase = [el.to(__snake_case ) for el in inputs["""mask_labels"""]] _lowerCAmelCase = [el.to(__snake_case ) for el in inputs["""class_labels"""]] with torch.no_grad(): _lowerCAmelCase = model(**__snake_case ) self.assertTrue(outputs.loss is not None )
70
'''simple docstring''' import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = [] A : Union[str, Any] = [] for i in range(self.num_layers ): A : Any = self.in_channels if i == 0 else self.out_channels A : Optional[Any] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Optional[int] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnets A : Union[str, Any] = attentions if self.add_downsample: A : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Union[str, Any]: """simple docstring""" A : Optional[Any] = () for resnet, attn in zip(self.resnets , self.attentions ): A : int = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Dict = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = [] for i in range(self.num_layers ): A : Optional[Any] = self.in_channels if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets if self.add_downsample: A : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]: """simple docstring""" A : str = () for resnet in self.resnets: A : Optional[int] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[int] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[Any] = [] A : Optional[int] = [] for i in range(self.num_layers ): A : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : Dict = self.prev_output_channel if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : int = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets A : Optional[Any] = attentions if self.add_upsample: A : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[int]: """simple docstring""" for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states A : List[str] = res_hidden_states_tuple[-1] A : int = res_hidden_states_tuple[:-1] A : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Tuple = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : Dict = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : int = [] for i in range(self.num_layers ): A : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : List[str] = self.prev_output_channel if i == 0 else self.out_channels A : str = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[Any] = resnets if self.add_upsample: A : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Tuple: """simple docstring""" for resnet in self.resnets: # pop res hidden states A : Optional[int] = res_hidden_states_tuple[-1] A : Optional[Any] = res_hidden_states_tuple[:-1] A : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Optional[Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : List[str] = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : str = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] A : List[Any] = [] for _ in range(self.num_layers ): A : int = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[str] = resnets A : List[str] = attentions def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict: """simple docstring""" A : Optional[Any] = self.resnets[0](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): A : Optional[int] = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) return hidden_states
3
0
from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('repo_id' ,['canonical_dataset_name', 'org-name/dataset-name'] ) @pytest.mark.parametrize('path' ,['filename.csv', 'filename with blanks.csv'] ) @pytest.mark.parametrize('revision' ,[None, 'v2'] ) def A ( a_ ,a_ ,a_ ) -> Optional[int]: __UpperCamelCase : Tuple =hf_hub_url(repo_id=a_ ,path=a_ ,revision=a_ ) assert url == F'https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(a_ )}'
71
'''simple docstring''' import os def lowerCAmelCase_ ( ): '''simple docstring''' A : List[Any] = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' ) with open(snake_case__ ) as file_hand: return str(sum(int(snake_case__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
3
0
"""simple docstring""" from __future__ import annotations import math def snake_case_ ( A_ : float, A_ : int ): '''simple docstring''' _lowerCamelCase : Tuple = u for i in range(1, A_ ): _lowerCamelCase : int = temp * (u - i) return temp def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Tuple = int(input('''enter the numbers of values: ''' ) ) _lowerCamelCase : list[list[float]] = [] for _ in range(A_ ): y.append([] ) for i in range(A_ ): for j in range(A_ ): y[i].append(A_ ) _lowerCamelCase : Optional[int] = 0 print('''enter the values of parameters in a list: ''' ) _lowerCamelCase : Optional[int] = list(map(A_, input().split() ) ) print('''enter the values of corresponding parameters: ''' ) for i in range(A_ ): _lowerCamelCase : Dict = float(input() ) _lowerCamelCase : Tuple = int(input('''enter the value to interpolate: ''' ) ) _lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1, A_ ): for j in range(n - i ): _lowerCamelCase : Optional[Any] = y[j + 1][i - 1] - y[j][i - 1] _lowerCamelCase : Any = y[0][0] for i in range(1, A_ ): summ += (ucal(A_, A_ ) * y[0][i]) / math.factorial(A_ ) print(F'''the value at {value} is {summ}''' ) if __name__ == "__main__": main()
72
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , snake_case__ ) A : Dict = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: A : Dict = dataset_size < in_memory_max_size else: A : Tuple = False A : int = is_small_dataset(snake_case__ ) assert result == expected
3
0
import logging from transformers.configuration_utils import PretrainedConfig a =logging.getLogger(__name__) class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : List[Any] = '''masked_bert''' def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]=3_0_5_2_2 ,SCREAMING_SNAKE_CASE__ : str=7_6_8 ,SCREAMING_SNAKE_CASE__ : List[str]=1_2 ,SCREAMING_SNAKE_CASE__ : List[str]=1_2 ,SCREAMING_SNAKE_CASE__ : str=3_0_7_2 ,SCREAMING_SNAKE_CASE__ : Tuple="gelu" ,SCREAMING_SNAKE_CASE__ : Tuple=0.1 ,SCREAMING_SNAKE_CASE__ : Any=0.1 ,SCREAMING_SNAKE_CASE__ : Any=5_1_2 ,SCREAMING_SNAKE_CASE__ : List[str]=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 ,SCREAMING_SNAKE_CASE__ : Any=1E-12 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0 ,SCREAMING_SNAKE_CASE__ : Optional[int]="topK" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="constant" ,SCREAMING_SNAKE_CASE__ : str=0.0 ,**SCREAMING_SNAKE_CASE__ : Tuple ,): super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Union[str, Any] = vocab_size __lowerCamelCase : Any = hidden_size __lowerCamelCase : Optional[Any] = num_hidden_layers __lowerCamelCase : Any = num_attention_heads __lowerCamelCase : List[Any] = hidden_act __lowerCamelCase : Optional[Any] = intermediate_size __lowerCamelCase : Optional[Any] = hidden_dropout_prob __lowerCamelCase : Optional[int] = attention_probs_dropout_prob __lowerCamelCase : Union[str, Any] = max_position_embeddings __lowerCamelCase : List[Any] = type_vocab_size __lowerCamelCase : Optional[Any] = initializer_range __lowerCamelCase : str = layer_norm_eps __lowerCamelCase : int = pruning_method __lowerCamelCase : str = mask_init __lowerCamelCase : Union[str, Any] = mask_scale
73
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class A ( __snake_case ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
3
0
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''microsoft/unispeech-sat-base-100h-libri-ft''': ( '''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json''' ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''unispeech-sat''' def __init__( self : int ,A_ : Optional[int]=32 ,A_ : Optional[int]=768 ,A_ : Union[str, Any]=12 ,A_ : Optional[int]=12 ,A_ : Optional[int]=3072 ,A_ : Union[str, Any]="gelu" ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=0.1 ,A_ : Any=0.1 ,A_ : Optional[int]=0.0 ,A_ : Union[str, Any]=0.0 ,A_ : str=0.1 ,A_ : Union[str, Any]=0.1 ,A_ : int=0.02 ,A_ : int=1e-5 ,A_ : int="group" ,A_ : List[str]="gelu" ,A_ : Dict=(512, 512, 512, 512, 512, 512, 512) ,A_ : Optional[int]=(5, 2, 2, 2, 2, 2, 2) ,A_ : Any=(10, 3, 3, 3, 3, 2, 2) ,A_ : Optional[Any]=False ,A_ : str=128 ,A_ : Union[str, Any]=16 ,A_ : List[str]=False ,A_ : str=True ,A_ : Dict=0.05 ,A_ : Optional[Any]=10 ,A_ : Optional[int]=2 ,A_ : List[Any]=0.0 ,A_ : Union[str, Any]=10 ,A_ : str=0 ,A_ : str=320 ,A_ : Any=2 ,A_ : Dict=0.1 ,A_ : Optional[Any]=100 ,A_ : Optional[int]=256 ,A_ : Union[str, Any]=256 ,A_ : Tuple=0.1 ,A_ : int="mean" ,A_ : List[Any]=False ,A_ : Tuple=False ,A_ : Dict=256 ,A_ : List[str]=(512, 512, 512, 512, 1500) ,A_ : Optional[Any]=(5, 3, 3, 1, 1) ,A_ : Optional[Any]=(1, 2, 3, 1, 1) ,A_ : List[Any]=512 ,A_ : Any=0 ,A_ : Tuple=1 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=504 ,**A_ : Dict ,) -> List[str]: super().__init__(**A_ ,pad_token_id=A_ ,bos_token_id=A_ ,eos_token_id=A_ ) A = hidden_size A = feat_extract_norm A = feat_extract_activation A = list(A_ ) A = list(A_ ) A = list(A_ ) A = conv_bias A = num_conv_pos_embeddings A = num_conv_pos_embedding_groups A = len(self.conv_dim ) A = num_hidden_layers A = intermediate_size A = hidden_act A = num_attention_heads A = hidden_dropout A = attention_dropout A = activation_dropout A = feat_proj_dropout A = final_dropout A = layerdrop A = layer_norm_eps A = initializer_range A = vocab_size A = num_clusters A = do_stable_layer_norm A = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A = apply_spec_augment A = mask_time_prob A = mask_time_length A = mask_time_min_masks A = mask_feature_prob A = mask_feature_length A = mask_feature_min_masks # parameters for pretraining with codevector quantized representations A = num_codevectors_per_group A = num_codevector_groups A = contrastive_logits_temperature A = feat_quantizer_dropout A = num_negatives A = codevector_dim A = proj_codevector_dim A = diversity_loss_weight # ctc loss A = ctc_loss_reduction A = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. A = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. A = list(A_ ) A = list(A_ ) A = list(A_ ) A = xvector_output_dim @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: return functools.reduce(operator.mul ,self.conv_stride ,1 )
74
'''simple docstring''' from scipy.stats import pearsonr import datasets lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" if return_pvalue: A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
3
0
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , ) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_50, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'pytorch', 'script': 'run_ddp.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_00, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'tensorflow', 'script': 'run_tf_dist.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_00, 'eval_accuracy': 0.6, 'eval_loss': 0.7}, }, ] ) class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" if self.framework == "pytorch": subprocess.run( f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split(), encoding='''utf-8''', check=lowerCAmelCase, ) assert hasattr(self, '''env''' ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =f'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}''' # distributed data settings lowerCamelCase_ ={'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=lowerCAmelCase, instance_count=lowerCAmelCase, instance_type=self.instance_type, debugger_hook_config=lowerCAmelCase, hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path}, metric_definitions=self.env.metric_definitions, distribution=lowerCAmelCase, py_version='''py36''', ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" TrainingJobAnalytics(lowerCAmelCase ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(2,)] ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.create_estimator(lowerCAmelCase ) # run training estimator.fit() # result dataframe lowerCamelCase_ =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCamelCase_ =list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) lowerCamelCase_ =list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCamelCase_ =( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''', 999_999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'''{estimator.latest_training_job.name}.json''', '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss}, lowerCAmelCase )
75
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowercase : Dict = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[Any] = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[str] = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Dict = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
3
0
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class _UpperCamelCase : '''simple docstring''' def __init__( self : List[Any] , a : Optional[Any] , a : List[Any]=99 , a : Union[str, Any]=13 , a : List[str]=16 , a : Tuple=7 , a : Dict=True , a : List[str]=True , a : Union[str, Any]=True , a : Optional[int]=False , a : Union[str, Any]=True , a : List[str]=2 , a : Dict=32 , a : List[Any]=4 , a : Optional[Any]=4 , a : List[str]=30 , a : str=0 , a : str=1 , a : Tuple=2 , a : Optional[Any]=None , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = parent SCREAMING_SNAKE_CASE : Tuple = batch_size SCREAMING_SNAKE_CASE : Dict = decoder_seq_length # For common tests SCREAMING_SNAKE_CASE : Dict = self.decoder_seq_length SCREAMING_SNAKE_CASE : Tuple = is_training SCREAMING_SNAKE_CASE : int = use_attention_mask SCREAMING_SNAKE_CASE : str = use_labels SCREAMING_SNAKE_CASE : List[str] = vocab_size SCREAMING_SNAKE_CASE : Union[str, Any] = d_model SCREAMING_SNAKE_CASE : List[Any] = d_model SCREAMING_SNAKE_CASE : Tuple = decoder_layers SCREAMING_SNAKE_CASE : Any = decoder_layers SCREAMING_SNAKE_CASE : Tuple = decoder_ffn_dim SCREAMING_SNAKE_CASE : int = decoder_attention_heads SCREAMING_SNAKE_CASE : int = decoder_attention_heads SCREAMING_SNAKE_CASE : Any = eos_token_id SCREAMING_SNAKE_CASE : int = bos_token_id SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id SCREAMING_SNAKE_CASE : str = decoder_start_token_id SCREAMING_SNAKE_CASE : Optional[Any] = use_cache SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : Optional[int] = decoder_seq_length SCREAMING_SNAKE_CASE : Union[str, Any] = 2 SCREAMING_SNAKE_CASE : Tuple = 1 def __UpperCamelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE : List[Any] = None if self.use_attention_mask: SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_labels: SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE : Optional[int] = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def __UpperCamelCase ( self : Tuple , a : Optional[Any] , a : int , a : Tuple , a : List[Any] , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = True SCREAMING_SNAKE_CASE : Optional[int] = TrOCRDecoder(config=a ).to(a ).eval() SCREAMING_SNAKE_CASE : Any = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass SCREAMING_SNAKE_CASE : Union[str, Any] = model(a , use_cache=a ) SCREAMING_SNAKE_CASE : Tuple = model(a ) SCREAMING_SNAKE_CASE : Tuple = model(a , use_cache=a ) self.parent.assertTrue(len(a ) == len(a ) ) self.parent.assertTrue(len(a ) == len(a ) + 1 ) SCREAMING_SNAKE_CASE : Any = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE : Any = model(a )["last_hidden_state"] SCREAMING_SNAKE_CASE : List[str] = model(a , past_key_values=a )["last_hidden_state"] # select random slice SCREAMING_SNAKE_CASE : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE : Optional[int] = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(a , a , atol=1e-3 ) def __UpperCamelCase ( self : Dict ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = config_and_inputs SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_torch class _UpperCamelCase ( __A , __A , __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =(TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () lowerCamelCase__ =(TrOCRForCausalLM,) if is_torch_available() else () lowerCamelCase__ ={'text-generation': TrOCRForCausalLM} if is_torch_available() else {} lowerCamelCase__ =True lowerCamelCase__ =False def __UpperCamelCase ( self : int ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=a ) SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=a ) def __UpperCamelCase ( self : Optional[int] ) -> str: """simple docstring""" pass def __UpperCamelCase ( self : Dict ) -> Any: """simple docstring""" pass def __UpperCamelCase ( self : Optional[Any] ) -> Tuple: """simple docstring""" pass def __UpperCamelCase ( self : Tuple ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def __UpperCamelCase ( self : int ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*a ) def __UpperCamelCase ( self : List[Any] ) -> Any: """simple docstring""" return @unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :) def __UpperCamelCase ( self : List[str] ) -> int: """simple docstring""" pass
76
'''simple docstring''' import os import sys import unittest lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py') lowercase : Optional[int] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py') class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Any = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : List[Any] = {'''BertModelTest''': '''BertModelTester'''} A : int = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : List[str] = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } A : Union[str, Any] = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : int = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Dict = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } A : str = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
3
0
"""simple docstring""" import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class UpperCAmelCase_ ( _a , _a): lowerCamelCase__ : Any = 1 @register_to_config def __init__( self , a = 1_0_0_0 , a = None ) -> Dict: # set `betas`, `alphas`, `timesteps` self.set_timesteps(a ) # standard deviation of the initial noise distribution lowercase__ : Optional[int] = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. lowercase__ : List[Any] = 4 # running values lowercase__ : Dict = [] def _UpperCAmelCase ( self , a , a = None ) -> Optional[int]: lowercase__ : int = num_inference_steps lowercase__ : Optional[int] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] lowercase__ : str = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: lowercase__ : Optional[int] = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: lowercase__ : Optional[int] = torch.sin(steps * math.pi / 2 ) ** 2 lowercase__ : int = (1.0 - self.betas**2) ** 0.5 lowercase__ : Optional[int] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] lowercase__ : int = timesteps.to(a ) lowercase__ : Any = [] def _UpperCAmelCase ( self , a , a , a , a = True , ) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError( 'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' ) lowercase__ : Optional[int] = (self.timesteps == timestep).nonzero().item() lowercase__ : int = timestep_index + 1 lowercase__ : Tuple = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(a ) if len(self.ets ) == 1: lowercase__ : Tuple = self.ets[-1] elif len(self.ets ) == 2: lowercase__ : Dict = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: lowercase__ : List[Any] = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2 else: lowercase__ : Dict = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4]) lowercase__ : str = self._get_prev_sample(a , a , a , a ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=a ) def _UpperCAmelCase ( self , a , *a , **a ) -> torch.FloatTensor: return sample def _UpperCAmelCase ( self , a , a , a , a ) -> int: lowercase__ : Dict = self.alphas[timestep_index] lowercase__ : Tuple = self.betas[timestep_index] lowercase__ : Dict = self.alphas[prev_timestep_index] lowercase__ : Tuple = self.betas[prev_timestep_index] lowercase__ : Optional[Any] = (sample - sigma * ets) / max(a , 1e-8 ) lowercase__ : Optional[int] = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self ) -> Union[str, Any]: return self.config.num_train_timesteps
77
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class A ( __snake_case ): __magic_name__ = DistilBertTokenizer __magic_name__ = DistilBertTokenizerFast __magic_name__ = True @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[Any] = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' ) A : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE ) A : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
3
0
"""simple docstring""" import random class A_ : """simple docstring""" @staticmethod def UpperCAmelCase__ ( lowercase_ :str ) -> tuple[list[int], list[int]]: UpperCAmelCase = [ord(lowercase_ ) for i in text] UpperCAmelCase = [] UpperCAmelCase = [] for i in plain: UpperCAmelCase = random.randint(1 , 3_00 ) UpperCAmelCase = (i + k) * k cipher.append(lowercase_ ) key.append(lowercase_ ) return cipher, key @staticmethod def UpperCAmelCase__ ( lowercase_ :list[int] , lowercase_ :list[int] ) -> str: UpperCAmelCase = [] for i in range(len(lowercase_ ) ): UpperCAmelCase = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(lowercase_ ) ) return "".join(lowercase_ ) if __name__ == "__main__": snake_case_ , snake_case_ = Onepad().encrypt("""Hello""") print(c, k) print(Onepad().decrypt(c, k))
78
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase : Optional[int] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''input_features''', '''attention_mask'''] def __init__( self , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=16000 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" super().__init__(feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = num_mel_bins A : Tuple = do_ceptral_normalize A : Dict = normalize_means A : List[Any] = normalize_vars A : List[str] = True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers A : Any = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) A : Any = ta_kaldi.fbank(SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray: """simple docstring""" if normalize_means: A : Dict = x[:input_length].mean(axis=0 ) A : Optional[Any] = np.subtract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if normalize_vars: A : str = x[:input_length].std(axis=0 ) A : int = np.divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if input_length < x.shape[0]: A : List[str] = padding_value # make sure array is in float32 A : Tuple = x.astype(np.floataa ) return x def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]: """simple docstring""" A : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' F' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A : List[Any] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) A : Tuple = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ): A : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A : Any = [raw_speech] # extract fbank features A : List[str] = [self._extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in raw_speech] # convert into correct format for padding A : str = BatchFeature({'''input_features''': features} ) A : Union[str, Any] = self.pad( SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) # make sure list is in array format A : List[str] = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ): A : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features] A : Union[str, Any] = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: A : Dict = ( np.array(SCREAMING_SNAKE_CASE , dtype=np.intaa ) if self._get_padding_strategies(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD else None ) A : List[Any] = self.normalize( padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE ) if return_tensors is not None: A : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE ) return padded_inputs
3
0
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def __lowercase ( __lowercase , __lowercase=False ) -> List[str]: '''simple docstring''' _A = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith("head" ): _A = "segformer.encoder." + key if key.startswith("backbone" ): _A = key.replace("backbone" , "segformer.encoder" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _A = key[key.find("patch_embed" ) + len("patch_embed" )] _A = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(__lowercase )-1}''' ) if "norm" in key: _A = key.replace("norm" , "layer_norm" ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _A = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )] _A = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(__lowercase )-1}''' ) if "layer_norm1" in key: _A = key.replace("layer_norm1" , "layer_norm_1" ) if "layer_norm2" in key: _A = key.replace("layer_norm2" , "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 _A = key[key.find("block" ) + len("block" )] _A = key.replace(F'''block{idx}''' , F'''block.{int(__lowercase )-1}''' ) if "attn.q" in key: _A = key.replace("attn.q" , "attention.self.query" ) if "attn.proj" in key: _A = key.replace("attn.proj" , "attention.output.dense" ) if "attn" in key: _A = key.replace("attn" , "attention.self" ) if "fc1" in key: _A = key.replace("fc1" , "dense1" ) if "fc2" in key: _A = key.replace("fc2" , "dense2" ) if "linear_pred" in key: _A = key.replace("linear_pred" , "classifier" ) if "linear_fuse" in key: _A = key.replace("linear_fuse.conv" , "linear_fuse" ) _A = key.replace("linear_fuse.bn" , "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _A = key[key.find("linear_c" ) + len("linear_c" )] _A = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(__lowercase )-1}''' ) if key.startswith("head" ): _A = key.replace("head" , "classifier" ) _A = value return new_state_dict def __lowercase ( __lowercase , __lowercase ) -> Optional[Any]: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _A = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' ) _A = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict _A = kv_weight[ : config.hidden_sizes[i], : ] _A = kv_bias[: config.hidden_sizes[i]] _A = kv_weight[ config.hidden_sizes[i] :, : ] _A = kv_bias[ config.hidden_sizes[i] : ] def __lowercase ( ) -> List[str]: '''simple docstring''' _A = "http://images.cocodataset.org/val2017/000000039769.jpg" _A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return image @torch.no_grad() def __lowercase ( __lowercase , __lowercase , __lowercase ) -> str: '''simple docstring''' _A = SegformerConfig() _A = False # set attributes based on model_name _A = "huggingface/label-files" if "segformer" in model_name: _A = model_name[len("segformer." ) : len("segformer." ) + 2] if "ade" in model_name: _A = 150 _A = "ade20k-id2label.json" _A = (1, 150, 128, 128) elif "city" in model_name: _A = 19 _A = "cityscapes-id2label.json" _A = (1, 19, 128, 128) else: raise ValueError(F'''Model {model_name} not supported''' ) elif "mit" in model_name: _A = True _A = model_name[4:6] _A = 1000 _A = "imagenet-1k-id2label.json" _A = (1, 1000) else: raise ValueError(F'''Model {model_name} not supported''' ) # set config attributes _A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) ) _A = {int(__lowercase ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": _A = [64, 128, 320, 512] _A = 256 elif size == "b2": _A = [64, 128, 320, 512] _A = 768 _A = [3, 4, 6, 3] elif size == "b3": _A = [64, 128, 320, 512] _A = 768 _A = [3, 4, 18, 3] elif size == "b4": _A = [64, 128, 320, 512] _A = 768 _A = [3, 8, 27, 3] elif size == "b5": _A = [64, 128, 320, 512] _A = 768 _A = [3, 6, 40, 3] else: raise ValueError(F'''Size {size} not supported''' ) # load image processor (only resize + normalize) _A = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=__lowercase , align=__lowercase , do_random_crop=__lowercase ) # prepare image _A = prepare_img() _A = image_processor(images=__lowercase , return_tensors="pt" ).pixel_values logger.info(F'''Converting model {model_name}...''' ) # load original state dict if encoder_only: _A = torch.load(__lowercase , map_location=torch.device("cpu" ) ) else: _A = torch.load(__lowercase , map_location=torch.device("cpu" ) )["state_dict"] # rename keys _A = rename_keys(__lowercase , encoder_only=__lowercase ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(__lowercase , __lowercase ) # create HuggingFace model and load state dict if encoder_only: _A = False _A = SegformerForImageClassification(__lowercase ) else: _A = SegformerForSemanticSegmentation(__lowercase ) model.load_state_dict(__lowercase ) model.eval() # forward pass _A = model(__lowercase ) _A = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": _A = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": _A = torch.tensor( [ [[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]], [[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]], [[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": _A = torch.tensor( [ [[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]], [[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]], [[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": _A = torch.tensor( [ [[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]], [[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]], [[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": _A = torch.tensor( [ [[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]], [[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]], [[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": _A = torch.tensor( [ [[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]], [[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]], [[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": _A = torch.tensor( [ [[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]], [[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]], [[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": _A = torch.tensor( [ [[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]], [[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]], [[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": _A = torch.tensor( [ [ [-1.1372e01, -1.2787e01, -1.3477e01], [-1.2536e01, -1.4194e01, -1.4409e01], [-1.3217e01, -1.4888e01, -1.5327e01], ], [ [-1.4791e01, -1.7122e01, -1.8277e01], [-1.7163e01, -1.9192e01, -1.9533e01], [-1.7897e01, -1.9991e01, -2.0315e01], ], [ [7.6723e-01, 4.1921e-01, -7.7878e-02], [4.7772e-01, 9.5557e-03, -2.8082e-01], [3.6032e-01, -2.4826e-01, -5.1168e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": _A = torch.tensor( [ [[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]], [[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]], [[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": _A = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": _A = torch.tensor( [ [[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]], [[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]], [[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": _A = torch.tensor( [ [[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]], [[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]], [[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": _A = torch.tensor( [ [[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]], [[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]], [[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": _A = torch.tensor( [ [[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]], [[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]], [[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]], ] ) else: _A = logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1e-2 ) # finally, save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__lowercase ).mkdir(exist_ok=__lowercase ) model.save_pretrained(__lowercase ) image_processor.save_pretrained(__lowercase ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''segformer.b0.512x512.ade.160k''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCamelCase_ = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
79
'''simple docstring''' import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 lowercase : str = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowercase : str = get_tests_dir('fixtures/vocab.json') lowercase : int = get_tests_dir('fixtures') class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = 0 def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : List[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Union[str, Any] = WavaVecaConfig() A : List[str] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) # save in new folder model_config.save_pretrained(SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Dict = WavaVecaFeatureExtractor() A : List[str] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in tokenizer with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : List[Any] = WavaVecaFeatureExtractor() A : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in feature extractor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : str = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : str = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' ) model_config.save_pretrained(SCREAMING_SNAKE_CASE ) # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) # create emtpy sample processor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write('''{}''' ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Union[str, Any] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) A : List[str] = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) A : Tuple = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version A : List[str] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE ) A : int = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE ): AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API A : List[Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Tuple = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : Optional[int] = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Any = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = '''AutoFeatureExtractor''' __magic_name__ = '''AutoTokenizer''' __magic_name__ = False try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local classes. A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. A : Optional[int] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. A : Tuple = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : int = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' ) self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' ) @is_staging_test class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def __lowerCAmelCase ( cls ) -> Dict: """simple docstring""" A : Optional[int] = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE ) @classmethod def __lowerCAmelCase ( cls ) -> Any: """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' ) except HTTPError: pass def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Union[str, Any] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A : int = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor-org''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization='''valid_org''' , ) A : int = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() A : Any = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : str = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F'{USER}/test-dynamic-processor' , token=self._token ) A : List[str] = Repository(SCREAMING_SNAKE_CASE , clone_from=F'{USER}/test-dynamic-processor' , token=self._token ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { '''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''', '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) self.assertDictEqual( tokenizer_config['''auto_map'''] , { '''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None], '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_feature_extraction.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_tokenization.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_processing.py''' ) ) ) repo.push_to_hub() A : Optional[int] = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
3
0
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class lowercase_ : def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=32 , a=2 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , a=10_00 , ): UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = seq_length UpperCamelCase__ = is_training UpperCamelCase__ = use_input_mask UpperCamelCase__ = use_token_type_ids UpperCamelCase__ = use_labels UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = type_vocab_size UpperCamelCase__ = type_sequence_label_size UpperCamelCase__ = initializer_range UpperCamelCase__ = num_labels UpperCamelCase__ = num_choices UpperCamelCase__ = scope UpperCamelCase__ = range_bbox def __a ( self ): UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCamelCase__ = bbox[i, j, 3] UpperCamelCase__ = bbox[i, j, 1] UpperCamelCase__ = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCamelCase__ = bbox[i, j, 2] UpperCamelCase__ = bbox[i, j, 0] UpperCamelCase__ = t UpperCamelCase__ = tf.convert_to_tensor(a ) UpperCamelCase__ = None if self.use_input_mask: UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase__ = None if self.use_token_type_ids: UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = None if self.use_labels: UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase__ = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self , a , a , a , a , a , a , a , a ): UpperCamelCase__ = TFLayoutLMModel(config=a ) UpperCamelCase__ = model(a , a , attention_mask=a , token_type_ids=a ) UpperCamelCase__ = model(a , a , token_type_ids=a ) UpperCamelCase__ = model(a , a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __a ( self , a , a , a , a , a , a , a , a ): UpperCamelCase__ = TFLayoutLMForMaskedLM(config=a ) UpperCamelCase__ = model(a , a , attention_mask=a , token_type_ids=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self , a , a , a , a , a , a , a , a ): UpperCamelCase__ = self.num_labels UpperCamelCase__ = TFLayoutLMForSequenceClassification(config=a ) UpperCamelCase__ = model(a , a , attention_mask=a , token_type_ids=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self , a , a , a , a , a , a , a , a ): UpperCamelCase__ = self.num_labels UpperCamelCase__ = TFLayoutLMForTokenClassification(config=a ) UpperCamelCase__ = model(a , a , attention_mask=a , token_type_ids=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self , a , a , a , a , a , a , a , a ): UpperCamelCase__ = TFLayoutLMForQuestionAnswering(config=a ) UpperCamelCase__ = model(a , a , attention_mask=a , token_type_ids=a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self ): UpperCamelCase__ = self.prepare_config_and_inputs() ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) = config_and_inputs UpperCamelCase__ = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class lowercase_ ( a__ , a__ , unittest.TestCase ): __UpperCAmelCase = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) __UpperCAmelCase = ( { 'feature-extraction': TFLayoutLMModel, 'fill-mask': TFLayoutLMForMaskedLM, 'text-classification': TFLayoutLMForSequenceClassification, 'token-classification': TFLayoutLMForTokenClassification, 'zero-shot': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = True __UpperCAmelCase = 10 def __a ( self ): UpperCamelCase__ = TFLayoutLMModelTester(self ) UpperCamelCase__ = ConfigTester(self , config_class=a , hidden_size=37 ) def __a ( self ): self.config_tester.run_common_tests() def __a ( self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def __a ( self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a ) def __a ( self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*a ) def __a ( self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a ) def __a ( self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a ) @slow def __a ( self ): for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ = TFLayoutLMModel.from_pretrained(a ) self.assertIsNotNone(a ) @unittest.skip("Onnx compliancy broke with TF 2.10" ) def __a ( self ): pass def _UpperCamelCase ( ) -> int: '''simple docstring''' UpperCamelCase__ = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231 UpperCamelCase__ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 UpperCamelCase__ = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 UpperCamelCase__ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) UpperCamelCase__ = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class lowercase_ ( unittest.TestCase ): @slow def __a ( self ): UpperCamelCase__ = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = prepare_layoutlm_batch_inputs() # forward pass UpperCamelCase__ = model(input_ids=a , bbox=a , attention_mask=a , token_type_ids=a ) # test the sequence output on [0, :3, :3] UpperCamelCase__ = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1e-3 ) ) # test the pooled output on [1, :3] UpperCamelCase__ = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , a , atol=1e-3 ) ) @slow def __a ( self ): # initialize model with randomly initialized sequence classification head UpperCamelCase__ = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = prepare_layoutlm_batch_inputs() # forward pass UpperCamelCase__ = model( input_ids=a , bbox=a , attention_mask=a , token_type_ids=a , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar UpperCamelCase__ = outputs.loss UpperCamelCase__ = (2,) self.assertEqual(loss.shape , a ) # test the shape of the logits UpperCamelCase__ = outputs.logits UpperCamelCase__ = (2, 2) self.assertEqual(logits.shape , a ) @slow def __a ( self ): # initialize model with randomly initialized token classification head UpperCamelCase__ = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = prepare_layoutlm_batch_inputs() # forward pass UpperCamelCase__ = model( input_ids=a , bbox=a , attention_mask=a , token_type_ids=a , labels=a ) # test the shape of the logits UpperCamelCase__ = outputs.logits UpperCamelCase__ = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , a ) @slow def __a ( self ): # initialize model with randomly initialized token classification head UpperCamelCase__ = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = prepare_layoutlm_batch_inputs() # forward pass UpperCamelCase__ = model(input_ids=a , bbox=a , attention_mask=a , token_type_ids=a ) # test the shape of the logits UpperCamelCase__ = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , a ) self.assertEqual(outputs.end_logits.shape , a )
80
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowercase : Optional[Any] = None lowercase : Tuple = logging.get_logger(__name__) lowercase : Dict = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'} lowercase : Tuple = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, 'tokenizer_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json', }, } lowercase : List[str] = { 'google/rembert': 2_56, } lowercase : Dict = '▁' class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = RemBertTokenizer def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token super().__init__( SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : List[Any] = do_lower_case A : str = remove_space A : int = keep_accents A : Union[str, Any] = vocab_file A : List[Any] = False if not self.vocab_file else True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : List[Any] = [self.sep_token_id] A : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : Tuple = [self.sep_token_id] A : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) ) return A : Any = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
3
0
"""simple docstring""" def _A ( lowercase = 2_00 ): """simple docstring""" a =[1, 2, 5, 10, 20, 50, 1_00, 2_00] a =[0] * (pence + 1) a =1 # base case: 1 way to make 0 pence for coin in coins: for i in range(lowercase , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(2_0_0) == 7_3_6_8_2
81
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase : Optional[Any] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''pixel_values'''] def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : str = size if size is not None else {'''shortest_edge''': 384} A : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : str = do_resize A : List[Any] = size # Default value set here for backwards compatibility where the value in config is None A : List[Any] = crop_pct if crop_pct is not None else 224 / 256 A : Optional[int] = resample A : Union[str, Any] = do_rescale A : List[str] = rescale_factor A : Union[str, Any] = do_normalize A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : str = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) A : Any = size['''shortest_edge'''] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct A : Dict = int(shortest_edge / crop_pct ) A : str = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : int = resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) else: # warping (no cropping) when evaluated at 384 or larger return resize( SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image: """simple docstring""" A : int = do_resize if do_resize is not None else self.do_resize A : Tuple = crop_pct if crop_pct is not None else self.crop_pct A : Optional[Any] = resample if resample is not None else self.resample A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean A : List[str] = image_std if image_std is not None else self.image_std A : Union[str, Any] = size if size is not None else self.size A : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : Any = make_list_of_images(SCREAMING_SNAKE_CASE ) if not valid_images(SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. A : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images] if do_resize: A : Any = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , crop_pct=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: A : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: A : Dict = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images] A : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images] A : Optional[int] = {'''pixel_values''': images} return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
3
0
from collections import defaultdict from math import ceil, sqrt def _UpperCAmelCase ( snake_case = 1_00_00_00 , snake_case = 10 ): """simple docstring""" _lowerCAmelCase = defaultdict(snake_case ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: _lowerCAmelCase = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: _lowerCAmelCase = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(snake_case , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f"{solution() = }")
82
'''simple docstring''' import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[str] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> int: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Any = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: A : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: A : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : int = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ): A : List[Any] = FlaxAutoModel.from_pretrained('''bert-base''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): A : Optional[int] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ): A : List[str] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , '''Use `from_pt=True` to load this model''' ): A : Any = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
3
0
'''simple docstring''' from manim import * class lowercase__ ( lowercase ): def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' _UpperCamelCase : Any = Rectangle(height=0.5 ,width=0.5 ) _UpperCamelCase : Tuple = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0 ) _UpperCamelCase : Union[str, Any] = [mem.copy() for i in range(6 )] _UpperCamelCase : Dict = [mem.copy() for i in range(6 )] _UpperCamelCase : Any = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 ) _UpperCamelCase : Tuple = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 ) _UpperCamelCase : List[Any] = VGroup(lowerCamelCase__ ,lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 ) _UpperCamelCase : Tuple = Text('CPU' ,font_size=24 ) _UpperCamelCase : Optional[Any] = Group(lowerCamelCase__ ,lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0.5 ,aligned_edge=lowerCamelCase__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCamelCase__ ) _UpperCamelCase : Optional[Any] = [mem.copy() for i in range(1 )] _UpperCamelCase : int = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 ) _UpperCamelCase : Any = Text('GPU' ,font_size=24 ) _UpperCamelCase : Dict = Group(lowerCamelCase__ ,lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0.5 ,aligned_edge=lowerCamelCase__ ) gpu.align_to(lowerCamelCase__ ,lowerCamelCase__ ) gpu.set_x(gpu.get_x() - 1 ) self.add(lowerCamelCase__ ) _UpperCamelCase : int = [mem.copy() for i in range(6 )] _UpperCamelCase : Dict = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 ) _UpperCamelCase : Optional[Any] = Text('Model' ,font_size=24 ) _UpperCamelCase : int = Group(lowerCamelCase__ ,lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0.5 ,aligned_edge=lowerCamelCase__ ) model.move_to([3, -1.0, 0] ) self.play( Create(lowerCamelCase__ ,run_time=1 ) ,Create(lowerCamelCase__ ,run_time=1 ) ,Create(lowerCamelCase__ ,run_time=1 ) ,) _UpperCamelCase : List[Any] = MarkupText( F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' ,font_size=24 ,) _UpperCamelCase : int = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _UpperCamelCase : Tuple = MarkupText( F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase__ ,run_time=2.5 ) ,Write(lowerCamelCase__ ) ,Write(lowerCamelCase__ ) ) self.add(lowerCamelCase__ ) _UpperCamelCase : int = [] _UpperCamelCase : Optional[Any] = [] _UpperCamelCase : str = [] for i, rect in enumerate(lowerCamelCase__ ): _UpperCamelCase : Any = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ ,opacity=0.7 ) cpu_target.move_to(lowerCamelCase__ ) cpu_target.generate_target() _UpperCamelCase : List[Any] = 0.4_6 / 4 _UpperCamelCase : Optional[Any] = 0.4_6 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.0_2 ,direction=lowerCamelCase__ ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target ,direction=lowerCamelCase__ ,buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=lowerCamelCase__ ,buff=0.0 ) cpu_targs.append(lowerCamelCase__ ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase__ ) ) second_animations.append(MoveToTarget(lowerCamelCase__ ,run_time=1.5 ) ) self.play(*lowerCamelCase__ ) self.play(*lowerCamelCase__ ) self.wait()
83
'''simple docstring''' import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowercase : Union[str, Any] = { 'text_branch': 'text_model', 'audio_branch': 'audio_model.audio_encoder', 'attn': 'attention.self', 'self.proj': 'output.dense', 'attention.self_mask': 'attn_mask', 'mlp.fc1': 'intermediate.dense', 'mlp.fc2': 'output.dense', 'norm1': 'layernorm_before', 'norm2': 'layernorm_after', 'bn0': 'batch_norm', } lowercase : Tuple = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc') def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' A, A : Tuple = create_model( '''HTSAT-tiny''' , '''roberta''' , snake_case__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case__ , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = {} A : str = R'''.*sequential.(\d+).*''' A : Union[str, Any] = R'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: A : Any = key.replace(snake_case__ , snake_case__ ) if re.match(snake_case__ , snake_case__ ): # replace sequential layers with list A : Any = re.match(snake_case__ , snake_case__ ).group(1 ) A : List[str] = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case__ )//3}.linear.' ) elif re.match(snake_case__ , snake_case__ ): A : Union[str, Any] = int(re.match(snake_case__ , snake_case__ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... A : str = 1 if projecton_layer == 0 else 2 A : Optional[Any] = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' ) if "audio" and "qkv" in key: # split qkv into query key and value A : int = value A : List[Any] = mixed_qkv.size(0 ) // 3 A : Union[str, Any] = mixed_qkv[:qkv_dim] A : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2] A : Optional[int] = mixed_qkv[qkv_dim * 2 :] A : Tuple = query_layer A : Union[str, Any] = key_layer A : Optional[int] = value_layer else: A : Dict = value return model_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' A, A : int = init_clap(snake_case__ , enable_fusion=snake_case__ ) clap_model.eval() A : str = clap_model.state_dict() A : Union[str, Any] = rename_state_dict(snake_case__ ) A : Tuple = ClapConfig() A : str = enable_fusion A : str = ClapModel(snake_case__ ) # ignore the spectrogram embedding layer model.load_state_dict(snake_case__ , strict=snake_case__ ) model.save_pretrained(snake_case__ ) transformers_config.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not') lowercase : Tuple = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
3
0
"""simple docstring""" def _snake_case ( lowercase__ : str , lowercase__ : str ) -> int: '''simple docstring''' if len(lowercase__ ) != len(lowercase__ ): raise ValueError("""String lengths must match!""" ) lowerCAmelCase_ :Optional[int] = 0 for chara, chara in zip(lowercase__ , lowercase__ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
84
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging lowercase : Dict = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: A : Union[str, Any] = os.path.abspath(snake_case__ ) logger.info(F'Loading PyTorch weights from {pt_path}' ) A : Any = torch.load(snake_case__ , map_location='''cpu''' ) logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' ) A : List[str] = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files A : Any = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ ) return flax_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' def is_key_or_prefix_key_in_dict(snake_case__ ) -> bool: return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0 # layer norm A : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean A : Tuple = pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var A : Dict = pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # embedding A : Any = pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # conv layer A : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ): A : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer A : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ): A : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight A : Dict = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias A : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 A : Dict = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): A : List[Any] = pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): A : List[str] = pt_tuple_key[-2] + '''_v''' if name is not None: A : int = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} A : int = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: A : List[str] = flax_model.params['''params'''] else: A : Dict = flax_model.params A : List[Any] = flatten_dict(snake_case__ ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : List[str] = flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(snake_case__ ) A : int = {} A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : int = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : str = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : Union[str, Any] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Any = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Dict = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : Any = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: A : Tuple = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : List[str] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch # Load the index A : Union[str, Any] = {} for shard_file in shard_filenames: # load using msgpack utils A : List[str] = torch.load(snake_case__ ) A : int = {k: v.numpy() for k, v in pt_state_dict.items()} A : Tuple = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : Optional[int] = flax_model.params['''params'''] A : List[Any] = flatten_dict(snake_case__ ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: A : Dict = flax_model.params A : Tuple = flatten_dict(snake_case__ ) A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : List[str] = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : int = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : List[str] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Union[str, Any] = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Any = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : int = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue if "var" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = os.path.abspath(snake_case__ ) logger.info(F'Loading Flax weights from {flax_checkpoint_path}' ) # import correct flax class A : List[str] = getattr(snake_case__ , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(snake_case__ , '''rb''' ) as state_f: try: A : int = from_bytes(snake_case__ , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights A : List[str] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) A : Optional[Any] = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) A : Union[str, Any] = flatten_dict(snake_case__ ) A : List[Any] = pt_model.state_dict() A : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) A : Tuple = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys A : int = [] A : Any = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): A : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix A : int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: A : List[str] = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: A : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict: # conv layer A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) A : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict: # linear layer A : Tuple = flax_key_tuple[:-1] + ('''weight''',) A : Tuple = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: A : List[Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: A : Union[str, Any] = '''.'''.join(snake_case__ ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. A : int = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: A : Optional[int] = key.split('''.''' ) A : Dict = None if key_components[-3::2] == ["parametrizations", "original0"]: A : List[str] = key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: A : List[Any] = key_components[-2] + '''_v''' if name is not None: A : str = key_components[:-3] + [name] A : Optional[Any] = '''.'''.join(snake_case__ ) A : Optional[Any] = key if flax_key in special_pt_names: A : Optional[Any] = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict A : Dict = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor A : Dict = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list A : List[Any] = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' ) if len(snake_case__ ) > 0: logger.warning( F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ''' use it for predictions and inference.''' ) else: logger.warning( F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'you can already use {pt_model.__class__.__name__} for predictions without further training.' ) return pt_model
3
0
'''simple docstring''' from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) _SCREAMING_SNAKE_CASE : List[str] = 2_9979_2458 # Symbols _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = symbols("ct x y z") def UpperCamelCase_( snake_case : float ): '''simple docstring''' if velocity > c: raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError("Speed must be greater than or equal to 1!" ) return velocity / c def UpperCamelCase_( snake_case : float ): '''simple docstring''' return 1 / sqrt(1 - beta(snake_case ) ** 2 ) def UpperCamelCase_( snake_case : float ): '''simple docstring''' return np.array( [ [gamma(snake_case ), -gamma(snake_case ) * beta(snake_case ), 0, 0], [-gamma(snake_case ) * beta(snake_case ), gamma(snake_case ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def UpperCamelCase_( snake_case : float , snake_case : np.ndarray | None = None ): '''simple docstring''' if event is None: snake_case_ = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(snake_case ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: _SCREAMING_SNAKE_CASE : List[str] = transform(2997_9245) print("Example of four vector: ") print(F"ct' = {four_vector[0]}") print(F"x' = {four_vector[1]}") print(F"y' = {four_vector[2]}") print(F"z' = {four_vector[3]}") # Substitute symbols with numerical values _SCREAMING_SNAKE_CASE : List[Any] = {ct: c, x: 1, y: 1, z: 1} _SCREAMING_SNAKE_CASE : List[Any] = [four_vector[i].subs(sub_dict) for i in range(4)] print(F"\n{numerical_vector}")
85
'''simple docstring''' import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint lowercase : Optional[int] = { '169M': 12, '430M': 24, '1B5': 24, '3B': 32, '7B': 32, '14B': 40, } lowercase : Optional[Any] = { '169M': 7_68, '430M': 10_24, '1B5': 20_48, '3B': 25_60, '7B': 40_96, '14B': 51_20, } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[Any] = list(state_dict.keys() ) for name in state_dict_keys: A : str = state_dict.pop(snake_case__ ) # emb -> embedding if name.startswith('''emb.''' ): A : Optional[Any] = name.replace('''emb.''' , '''embeddings.''' ) # ln_0 -> pre_ln (only present at block 0) if name.startswith('''blocks.0.ln0''' ): A : Union[str, Any] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' ) # att -> attention A : int = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , snake_case__ ) # ffn -> feed_forward A : List[Any] = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , snake_case__ ) # time_mix_k -> time_mix_key and reshape if name.endswith('''.time_mix_k''' ): A : List[str] = name.replace('''.time_mix_k''' , '''.time_mix_key''' ) # time_mix_v -> time_mix_value and reshape if name.endswith('''.time_mix_v''' ): A : Union[str, Any] = name.replace('''.time_mix_v''' , '''.time_mix_value''' ) # time_mix_r -> time_mix_key and reshape if name.endswith('''.time_mix_r''' ): A : Union[str, Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' ) if name != "head.weight": A : List[Any] = '''rwkv.''' + name A : Dict = weight return state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=None ): '''simple docstring''' if tokenizer_file is None: print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' ) A : int = 5_0277 A : Optional[int] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' ) else: A : str = PreTrainedTokenizerFast(tokenizer_file=snake_case__ ) A : Any = len(snake_case__ ) tokenizer.save_pretrained(snake_case__ ) # 2. Build the config A : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: A : List[str] = candidate break if size is None: raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' ) if size not in possible_sizes: raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' ) A : Any = RwkvConfig( vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(snake_case__ ) # 3. Download model file then convert state_dict A : Union[str, Any] = hf_hub_download(snake_case__ , snake_case__ ) A : Tuple = torch.load(snake_case__ , map_location='''cpu''' ) A : List[Any] = convert_state_dict(snake_case__ ) # 4. Split in shards and save A, A : List[str] = shard_checkpoint(snake_case__ ) for shard_file, shard in shards.items(): torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) if index is not None: A : Dict = os.path.join(snake_case__ , snake_case__ ) # Save the index as well with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: A : List[Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n''' f.write(snake_case__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( '''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' ) A : List[Any] = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: A : Union[str, Any] = torch.load(os.path.join(snake_case__ , snake_case__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' ) A : int = AutoModelForCausalLM.from_pretrained(snake_case__ ) model.push_to_hub(snake_case__ , max_shard_size='''2GB''' ) tokenizer.push_to_hub(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.' ) parser.add_argument( '--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.' ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='Where to save the converted model.' ) parser.add_argument( '--tokenizer_file', default=None, type=str, help='Path to the tokenizer file to use (if not provided, only the model is converted).', ) parser.add_argument( '--size', default=None, type=str, help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Push to the Hub the converted model.', ) parser.add_argument( '--model_name', default=None, type=str, help='Name of the pushed model on the Hub, including the username / organization.', ) lowercase : Union[str, Any] = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
3
0
"""simple docstring""" import os from pathlib import Path def __lowerCAmelCase (): from torch.utils.cpp_extension import load __lowerCAmelCase : Dict = Path(_UpperCamelCase ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr' __lowerCAmelCase : int = [ root / filename for filename in [ 'vision.cpp', os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ), os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ), ] ] load( 'MultiScaleDeformableAttention' , _UpperCamelCase , with_cuda=_UpperCamelCase , extra_include_paths=[str(_UpperCamelCase )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[ '-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__', ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
86
'''simple docstring''' from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING lowercase : str = logging.get_logger(__name__) @add_end_docstrings(__snake_case ) class A ( __snake_case ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) self.check_model_type(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A, A : Dict = {}, {} if padding is not None: A : List[str] = padding if truncation is not None: A : Dict = truncation if top_k is not None: A : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : int = {'''image''': image, '''question''': question} else: A : Any = image A : Any = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) return results def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" A : Union[str, Any] = load_image(inputs['''image'''] ) A : Optional[Any] = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE ) A : Dict = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(SCREAMING_SNAKE_CASE ) return model_inputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : List[Any] = self.model(**SCREAMING_SNAKE_CASE ) return model_outputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> int: """simple docstring""" if top_k > self.model.config.num_labels: A : Dict = self.model.config.num_labels if self.framework == "pt": A : Optional[int] = model_outputs.logits.sigmoid()[0] A, A : int = probs.topk(SCREAMING_SNAKE_CASE ) else: raise ValueError(F'Unsupported framework: {self.framework}' ) A : int = scores.tolist() A : List[str] = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
3
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase : Union[str, Any] = logging.get_logger(__name__) lowercase : str = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class A ( __snake_case ): __magic_name__ = '''bert''' def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = vocab_size A : Optional[Any] = hidden_size A : List[Any] = num_hidden_layers A : List[str] = num_attention_heads A : Dict = hidden_act A : Optional[Any] = intermediate_size A : List[Any] = hidden_dropout_prob A : List[Any] = attention_probs_dropout_prob A : Optional[Any] = max_position_embeddings A : List[str] = type_vocab_size A : Dict = initializer_range A : str = layer_norm_eps A : int = position_embedding_type A : Dict = use_cache A : str = classifier_dropout class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A : Optional[int] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
3
0
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) __lowerCAmelCase : Optional[Any] = logging.getLogger(__name__) def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = np.argmax(A_, axis=1 ) return np.sum(outputs == labels ) def a__ ( A_ ): '''simple docstring''' with open(A_, encoding="""utf_8""" ) as f: __magic_name__ = csv.reader(A_ ) __magic_name__ = [] next(A_ ) # skip the first line for line in tqdm(A_ ): output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def a__ ( A_, A_, A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = [] for dataset in encoded_datasets: __magic_name__ = len(A_ ) __magic_name__ = np.zeros((n_batch, 2, input_len), dtype=np.intaa ) __magic_name__ = np.zeros((n_batch, 2), dtype=np.intaa ) __magic_name__ = np.full((n_batch, 2, input_len), fill_value=-100, dtype=np.intaa ) __magic_name__ = np.zeros((n_batch,), dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(A_ ): __magic_name__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __magic_name__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __magic_name__ = with_conta __magic_name__ = with_conta __magic_name__ = len(A_ ) - 1 __magic_name__ = len(A_ ) - 1 __magic_name__ = with_conta __magic_name__ = with_conta __magic_name__ = mc_label __magic_name__ = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(A_ ) for t in all_inputs ) ) return tensor_datasets def a__ ( ): '''simple docstring''' __magic_name__ = argparse.ArgumentParser() parser.add_argument("""--model_name""", type=A_, default="""openai-gpt""", help="""pretrained model name""" ) parser.add_argument("""--do_train""", action="""store_true""", help="""Whether to run training.""" ) parser.add_argument("""--do_eval""", action="""store_true""", help="""Whether to run eval on the dev set.""" ) parser.add_argument( """--output_dir""", default=A_, type=A_, required=A_, help="""The output directory where the model predictions and checkpoints will be written.""", ) parser.add_argument("""--train_dataset""", type=A_, default="""""" ) parser.add_argument("""--eval_dataset""", type=A_, default="""""" ) parser.add_argument("""--seed""", type=A_, default=42 ) parser.add_argument("""--num_train_epochs""", type=A_, default=3 ) parser.add_argument("""--train_batch_size""", type=A_, default=8 ) parser.add_argument("""--eval_batch_size""", type=A_, default=16 ) parser.add_argument("""--adam_epsilon""", default=1e-8, type=A_, help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""", type=A_, default=1 ) parser.add_argument( """--max_steps""", default=-1, type=A_, help=( """If > 0: set total number of training steps to perform. Override num_train_epochs.""" ), ) parser.add_argument( """--gradient_accumulation_steps""", type=A_, default=1, help="""Number of updates steps to accumulate before performing a backward/update pass.""", ) parser.add_argument("""--learning_rate""", type=A_, default=6.25e-5 ) parser.add_argument("""--warmup_steps""", default=0, type=A_, help="""Linear warmup over warmup_steps.""" ) parser.add_argument("""--lr_schedule""", type=A_, default="""warmup_linear""" ) parser.add_argument("""--weight_decay""", type=A_, default=0.01 ) parser.add_argument("""--lm_coef""", type=A_, default=0.9 ) parser.add_argument("""--n_valid""", type=A_, default=374 ) parser.add_argument("""--server_ip""", type=A_, default="""""", help="""Can be used for distant debugging.""" ) parser.add_argument("""--server_port""", type=A_, default="""""", help="""Can be used for distant debugging.""" ) __magic_name__ = parser.parse_args() print(A_ ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("""Waiting for debugger attach""" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=A_ ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) __magic_name__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) __magic_name__ = torch.cuda.device_count() logger.info("""device: {}, n_gpu {}""".format(A_, A_ ) ) if not args.do_train and not args.do_eval: raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset __magic_name__ = ["""_start_""", """_delimiter_""", """_classify_"""] __magic_name__ = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(A_ ) __magic_name__ = tokenizer.convert_tokens_to_ids(A_ ) __magic_name__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(A_ ) ) model.to(A_ ) # Load and encode the datasets def tokenize_and_encode(A_ ): if isinstance(A_, A_ ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(A_ ) ) elif isinstance(A_, A_ ): return obj return [tokenize_and_encode(A_ ) for o in obj] logger.info("""Encoding dataset...""" ) __magic_name__ = load_rocstories_dataset(args.train_dataset ) __magic_name__ = load_rocstories_dataset(args.eval_dataset ) __magic_name__ = (train_dataset, eval_dataset) __magic_name__ = tokenize_and_encode(A_ ) # Compute the max input length for the Transformer __magic_name__ = model.config.n_positions // 2 - 2 __magic_name__ = max( len(story[:max_length] ) + max(len(conta[:max_length] ), len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) __magic_name__ = min(A_, model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders __magic_name__ = pre_process_datasets(A_, A_, A_, *A_ ) __magic_name__ , __magic_name__ = tensor_datasets[0], tensor_datasets[1] __magic_name__ = TensorDataset(*A_ ) __magic_name__ = RandomSampler(A_ ) __magic_name__ = DataLoader(A_, sampler=A_, batch_size=args.train_batch_size ) __magic_name__ = TensorDataset(*A_ ) __magic_name__ = SequentialSampler(A_ ) __magic_name__ = DataLoader(A_, sampler=A_, batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: __magic_name__ = args.max_steps __magic_name__ = args.max_steps // (len(A_ ) // args.gradient_accumulation_steps) + 1 else: __magic_name__ = len(A_ ) // args.gradient_accumulation_steps * args.num_train_epochs __magic_name__ = list(model.named_parameters() ) __magic_name__ = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""] __magic_name__ = [ { """params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], """weight_decay""": args.weight_decay, }, {"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0}, ] __magic_name__ = AdamW(A_, lr=args.learning_rate, eps=args.adam_epsilon ) __magic_name__ = get_linear_schedule_with_warmup( A_, num_warmup_steps=args.warmup_steps, num_training_steps=A_ ) if args.do_train: __magic_name__ , __magic_name__ , __magic_name__ = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ), desc="""Epoch""" ): __magic_name__ = 0 __magic_name__ = 0 __magic_name__ = tqdm(A_, desc="""Training""" ) for step, batch in enumerate(A_ ): __magic_name__ = tuple(t.to(A_ ) for t in batch ) __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = batch __magic_name__ = model(A_, mc_token_ids=A_, lm_labels=A_, mc_labels=A_ ) __magic_name__ = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() __magic_name__ = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 __magic_name__ = """Training loss: {:.2e} lr: {:.2e}""".format(A_, scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer __magic_name__ = model.module if hasattr(A_, """module""" ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` __magic_name__ = os.path.join(args.output_dir, A_ ) __magic_name__ = os.path.join(args.output_dir, A_ ) torch.save(model_to_save.state_dict(), A_ ) model_to_save.config.to_json_file(A_ ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned __magic_name__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) __magic_name__ = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(A_ ) if args.do_eval: model.eval() __magic_name__ , __magic_name__ = 0, 0 __magic_name__ , __magic_name__ = 0, 0 for batch in tqdm(A_, desc="""Evaluating""" ): __magic_name__ = tuple(t.to(A_ ) for t in batch ) __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = batch with torch.no_grad(): __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = model( A_, mc_token_ids=A_, lm_labels=A_, mc_labels=A_ ) __magic_name__ = mc_logits.detach().cpu().numpy() __magic_name__ = mc_labels.to("""cpu""" ).numpy() __magic_name__ = accuracy(A_, A_ ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 __magic_name__ = eval_loss / nb_eval_steps __magic_name__ = eval_accuracy / nb_eval_examples __magic_name__ = tr_loss / nb_tr_steps if args.do_train else None __magic_name__ = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss} __magic_name__ = os.path.join(args.output_dir, """eval_results.txt""" ) with open(A_, """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key in sorted(result.keys() ): logger.info(""" %s = %s""", A_, str(result[key] ) ) writer.write("""%s = %s\n""" % (key, str(result[key] )) ) if __name__ == "__main__": main()
88
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : str = BeautifulSoup(requests.get(snake_case__ , params=snake_case__ ).content , '''html.parser''' ) A : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) A : Optional[int] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": lowercase : str = { 'title': ( 'Precisely geometry controlled microsupercapacitors for ultrahigh areal ' 'capacitance, volumetric capacitance, and energy density' ), 'journal': 'Chem. Mater.', 'volume': 30, 'pages': '3979-3990', 'year': 20_18, 'hl': 'en', } print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
3
0
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} __lowerCAmelCase = { '''vocab_file''': { '''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''', } } class __magic_name__ ( _UpperCamelCase ): def __init__( self : Optional[int] ,_UpperCAmelCase : str ,_UpperCAmelCase : List[Any]=False ,_UpperCAmelCase : List[Any]=True ,_UpperCAmelCase : Optional[int]=False ,_UpperCAmelCase : Dict="<s>" ,_UpperCAmelCase : Tuple="</s>" ,_UpperCAmelCase : Dict="<unk>" ,_UpperCAmelCase : Dict="<sep>" ,_UpperCAmelCase : List[str]="<pad>" ,_UpperCAmelCase : List[Any]="<cls>" ,_UpperCAmelCase : Union[str, Any]="<mask>" ,_UpperCAmelCase : Optional[Any]=["<eop>", "<eod>"] ,_UpperCAmelCase : Optional[Dict[str, Any]] = None ,**_UpperCAmelCase : Union[str, Any] ,): _a : List[Any] = AddedToken(_UpperCAmelCase ,lstrip=_UpperCAmelCase ,rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else mask_token _a : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_UpperCAmelCase ,remove_space=_UpperCAmelCase ,keep_accents=_UpperCAmelCase ,bos_token=_UpperCAmelCase ,eos_token=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,sep_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,cls_token=_UpperCAmelCase ,mask_token=_UpperCAmelCase ,additional_special_tokens=_UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_UpperCAmelCase ,) _a : List[Any] = 3 _a : Optional[int] = do_lower_case _a : Dict = remove_space _a : Union[str, Any] = keep_accents _a : Any = vocab_file _a : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCAmelCase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( 'You need to install jieba to use CpmTokenizer or CpmTokenizerFast. ' 'See https://pypi.org/project/jieba/ for installation.' ) _a : Optional[int] = jieba _a : Union[str, Any] = str.maketrans(' \n' ,'\u2582\u2583' ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def __lowercase ( self : Dict ): return len(self.sp_model ) def __lowercase ( self : Dict ): _a : Optional[Any] = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : int ): _a : Tuple = self.__dict__.copy() _a : Any = None return state def __setstate__( self : Any ,_UpperCAmelCase : Optional[int] ): _a : Any = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): _a : Optional[int] = {} _a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : str ): if self.remove_space: _a : List[str] = ' '.join(inputs.strip().split() ) else: _a : int = inputs _a : str = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' ) if not self.keep_accents: _a : Dict = unicodedata.normalize('NFKD' ,_UpperCAmelCase ) _a : List[str] = ''.join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] ) if self.do_lower_case: _a : Dict = outputs.lower() return outputs def __lowercase ( self : List[str] ,_UpperCAmelCase : str ): _a : int = self.preprocess_text(_UpperCAmelCase ) _a : Dict = self.sp_model.encode(_UpperCAmelCase ,out_type=_UpperCAmelCase ) _a : Optional[Any] = [] for piece in pieces: if len(_UpperCAmelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit(): _a : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase ,'' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _a : Optional[int] = cur_pieces[1:] else: _a : Optional[int] = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_UpperCAmelCase ) else: new_pieces.append(_UpperCAmelCase ) return new_pieces def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Union[str, Any] ): return self.sp_model.PieceToId(_UpperCAmelCase ) def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : List[str] ): return self.sp_model.IdToPiece(_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Any ): _a : Dict = ''.join(_UpperCAmelCase ).replace(_UpperCAmelCase ,' ' ).strip() return out_string def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ): _a : List[str] = [self.sep_token_id] _a : int = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ,_UpperCAmelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase ,token_ids_a=_UpperCAmelCase ,already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] return ([0] * len(_UpperCAmelCase )) + [1, 1] def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ): _a : Union[str, Any] = [self.sep_token_id] _a : List[str] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def __lowercase ( self : List[str] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ): if not os.path.isdir(_UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _a : List[Any] = os.path.join( _UpperCAmelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase ,'wb' ) as fi: _a : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,) def __lowercase ( self : int ,*_UpperCAmelCase : Dict ,**_UpperCAmelCase : List[Any] ): _a : List[str] = super()._decode(*_UpperCAmelCase ,**_UpperCAmelCase ) _a : List[str] = text.replace(' ' ,'' ).replace('\u2582' ,' ' ).replace('\u2583' ,'\n' ) return text
89
'''simple docstring''' class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Any = None A : Optional[Any] = None A : Tuple = graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Dict = len(SCREAMING_SNAKE_CASE ) A : Optional[Any] = None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if sources is int: A : Dict = [sources] if sinks is int: A : str = [sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return A : Optional[int] = sources[0] A : Union[str, Any] = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: A : Optional[int] = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A : Dict = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A : Dict = max_input_flow A : Tuple = 0 A : Tuple = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A : Optional[Any] = max_input_flow A : Optional[Any] = size - 1 def __lowerCAmelCase ( self ) -> Any: """simple docstring""" if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : List[Any] = algorithm(self ) class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = flow_network A : Optional[Any] = flow_network.verticesCount A : Tuple = flow_network.sourceIndex A : Dict = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A : str = flow_network.graph A : Optional[Any] = False def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" if not self.executed: self._algorithm() A : Optional[int] = True def __lowerCAmelCase ( self ) -> Any: """simple docstring""" pass class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result A : List[str] = -1 def __lowerCAmelCase ( self ) -> str: """simple docstring""" if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) A : Optional[Any] = [[0] * self.verticies_count for i in range(self.verticies_count )] A : Union[str, Any] = [0] * self.verticies_count A : List[Any] = [0] * self.verticies_count def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A : Optional[Any] = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A : Union[str, Any] = 0 while i < len(SCREAMING_SNAKE_CASE ): A : str = vertices_list[i] A : List[str] = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) A : int = 0 else: i += 1 A : Optional[Any] = sum(self.preflow[self.source_index] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Dict = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Dict = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A : Dict = self.heights[to_index] if min_height is not None: A : Dict = min_height + 1 if __name__ == "__main__": lowercase : Optional[int] = [0] lowercase : List[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowercase : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowercase : List[str] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowercase : List[str] = flow_network.find_maximum_flow() print(f'''maximum flow is {maximum_flow}''')
3
0
import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ) -> Dict: """simple docstring""" __lowerCamelCase = TapasConfig.from_json_file(UpperCamelCase__ ) # set absolute/relative position embeddings parameter __lowerCamelCase = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": __lowerCamelCase = TapasForQuestionAnswering(config=UpperCamelCase__ ) elif task == "WTQ": # run_task_main.py hparams __lowerCamelCase = 4 __lowerCamelCase = True # hparam_utils.py hparams __lowerCamelCase = 0.66_46_94 __lowerCamelCase = 0.20_79_51 __lowerCamelCase = 0.12_11_94 __lowerCamelCase = True __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = 0.0_35_25_13 __lowerCamelCase = TapasForQuestionAnswering(config=UpperCamelCase__ ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams __lowerCamelCase = 4 __lowerCamelCase = False # hparam_utils.py hparams __lowerCamelCase = 36.45_19 __lowerCamelCase = 0.90_34_21 __lowerCamelCase = 2_22.0_88 __lowerCamelCase = True __lowerCamelCase = True __lowerCamelCase = True __lowerCamelCase = 0.76_31_41 __lowerCamelCase = TapasForQuestionAnswering(config=UpperCamelCase__ ) elif task == "TABFACT": __lowerCamelCase = TapasForSequenceClassification(config=UpperCamelCase__ ) elif task == "MLM": __lowerCamelCase = TapasForMaskedLM(config=UpperCamelCase__ ) elif task == "INTERMEDIATE_PRETRAINING": __lowerCamelCase = TapasModel(config=UpperCamelCase__ ) else: raise ValueError(F"""Task {task} not supported.""" ) print(F"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save pytorch-model (weights and configuration) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(UpperCamelCase__ ) # Save tokenizer files print(F"""Save tokenizer files to {pytorch_dump_path}""" ) __lowerCamelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 ) tokenizer.save_pretrained(UpperCamelCase__ ) print('Used relative position embeddings:' , model.config.reset_position_index_per_cell ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA." ) parser.add_argument( "--reset_position_index_per_cell", default=False, action="store_true", help="Whether to use relative position embeddings or not. Defaults to True.", ) parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--tapas_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained TAPAS model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __A = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
90
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ = 10 ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ) or n < 0: raise ValueError('''Invalid input''' ) A : List[str] = 10**n A : Tuple = 2_8433 * (pow(2 , 783_0457 , snake_case__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(10) = }''')
3
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase_ : List[str] = { """configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""], """tokenization_ctrl""": ["""CTRLTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Union[str, Any] = [ """CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """CTRLForSequenceClassification""", """CTRLLMHeadModel""", """CTRLModel""", """CTRLPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : List[Any] = [ """TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFCTRLForSequenceClassification""", """TFCTRLLMHeadModel""", """TFCTRLModel""", """TFCTRLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
91
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowercase : List[str] = logging.get_logger(__name__) lowercase : str = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class A ( __snake_case ): __magic_name__ = '''gpt_neo''' __magic_name__ = ['''past_key_values'''] __magic_name__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , SCREAMING_SNAKE_CASE=50257 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=24 , SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=50256 , SCREAMING_SNAKE_CASE=50256 , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" A : Union[str, Any] = vocab_size A : Optional[Any] = max_position_embeddings A : Dict = hidden_size A : Optional[Any] = num_layers A : Tuple = num_heads A : int = intermediate_size A : Optional[Any] = window_size A : List[Any] = activation_function A : Union[str, Any] = resid_dropout A : Any = embed_dropout A : List[Any] = attention_dropout A : str = classifier_dropout A : List[Any] = layer_norm_epsilon A : str = initializer_range A : List[str] = use_cache A : Optional[int] = bos_token_id A : List[Any] = eos_token_id A : int = attention_types A : int = self.expand_attention_types_params(SCREAMING_SNAKE_CASE ) if len(self.attention_layers ) != self.num_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.attention_layers)` == `config.num_layers` ''' F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, ' F'`config.num_layers = {self.num_layers}`. ' '''`config.attention_layers` is prepared using `config.attention_types`. ''' '''Please verify the value of `config.attention_types` argument.''' ) super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : List[str] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' import torch A : Tuple = input.size() A : Union[str, Any] = len(snake_case__ ) A : List[str] = shape[dimension] A : Union[str, Any] = torch.arange(0 , snake_case__ , snake_case__ ) A : List[str] = torch.div(sizedim - size , snake_case__ , rounding_mode='''floor''' ) + 1 A : Optional[int] = torch.arange(snake_case__ ) + low_indices[:min_length][:, None] A : str = [slice(snake_case__ )] * rank A : List[Any] = indices A : Union[str, Any] = input[s] A : List[str] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch A : List[str] = torch.arange(1 , snake_case__ ) A : Optional[int] = torch.remainder(snake_case__ , snake_case__ ) A : Optional[int] = remainders == 0 A : Optional[Any] = candidates[divisor_indices] A : Optional[int] = torch.max(snake_case__ ) return largest_divisor, torch.div(snake_case__ , snake_case__ , rounding_mode='''floor''' ) class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" A : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction='''inputs''' ) A : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: A : Dict = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return self._config.num_heads def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]: """simple docstring""" A : List[str] = super(SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs( SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() A : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch A, A : Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values A : str = seqlen + 2 A : List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A : Any = [ (torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] A : str = common_inputs['''attention_mask'''] if self.use_past: A : Optional[int] = ordered_inputs['''attention_mask'''].dtype A : List[str] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 ) return ordered_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return 13
3
0
import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class a__ ( unittest.TestCase ): def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=4 , ): """simple docstring""" __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = seq_length __lowerCAmelCase = is_training __lowerCAmelCase = use_attention_mask __lowerCAmelCase = use_token_type_ids __lowerCAmelCase = use_labels __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = type_sequence_label_size __lowerCAmelCase = initializer_range __lowerCAmelCase = num_choices def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase = None if self.use_attention_mask: __lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase = None if self.use_token_type_ids: __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs __lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs __lowerCAmelCase = True __lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class a__ ( snake_case__ , unittest.TestCase ): _a : List[str] = True _a : List[str] = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = FlaxBertModelTester(self ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = FlaxBertModel.from_pretrained("bert-base-cased" ) __lowerCAmelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(_A )
92
'''simple docstring''' import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = [] A : Union[str, Any] = [] for i in range(self.num_layers ): A : Any = self.in_channels if i == 0 else self.out_channels A : Optional[Any] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Optional[int] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnets A : Union[str, Any] = attentions if self.add_downsample: A : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Union[str, Any]: """simple docstring""" A : Optional[Any] = () for resnet, attn in zip(self.resnets , self.attentions ): A : int = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Dict = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = [] for i in range(self.num_layers ): A : Optional[Any] = self.in_channels if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets if self.add_downsample: A : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]: """simple docstring""" A : str = () for resnet in self.resnets: A : Optional[int] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[int] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[Any] = [] A : Optional[int] = [] for i in range(self.num_layers ): A : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : Dict = self.prev_output_channel if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : int = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets A : Optional[Any] = attentions if self.add_upsample: A : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[int]: """simple docstring""" for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states A : List[str] = res_hidden_states_tuple[-1] A : int = res_hidden_states_tuple[:-1] A : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Tuple = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : Dict = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : int = [] for i in range(self.num_layers ): A : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : List[str] = self.prev_output_channel if i == 0 else self.out_channels A : str = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[Any] = resnets if self.add_upsample: A : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Tuple: """simple docstring""" for resnet in self.resnets: # pop res hidden states A : Optional[int] = res_hidden_states_tuple[-1] A : Optional[Any] = res_hidden_states_tuple[:-1] A : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Optional[Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : List[str] = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : str = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] A : List[Any] = [] for _ in range(self.num_layers ): A : int = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[str] = resnets A : List[str] = attentions def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict: """simple docstring""" A : Optional[Any] = self.resnets[0](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): A : Optional[int] = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) return hidden_states
3
0
'''simple docstring''' import random class lowerCAmelCase__ : @staticmethod def _snake_case ( __SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : Dict = [ord(__SCREAMING_SNAKE_CASE ) for i in text] lowercase_ : Optional[Any] = [] lowercase_ : Union[str, Any] = [] for i in plain: lowercase_ : List[str] = random.randint(1 , 3_00 ) lowercase_ : Optional[int] = (i + k) * k cipher.append(__SCREAMING_SNAKE_CASE ) key.append(__SCREAMING_SNAKE_CASE ) return cipher, key @staticmethod def _snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : Tuple = [] for i in range(len(__SCREAMING_SNAKE_CASE ) ): lowercase_ : Union[str, Any] = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(__SCREAMING_SNAKE_CASE ) ) return "".join(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": _lowercase , _lowercase : Optional[int] = Onepad().encrypt("Hello") print(c, k) print(Onepad().decrypt(c, k))
93
'''simple docstring''' import os def lowerCAmelCase_ ( ): '''simple docstring''' A : List[Any] = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' ) with open(snake_case__ ) as file_hand: return str(sum(int(snake_case__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
3
0
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. snake_case : List[str] = abspath(join(dirname(__file__), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def __lowerCamelCase ( UpperCAmelCase_ : str ): """simple docstring""" config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def __lowerCamelCase ( UpperCAmelCase_ : List[Any] ): """simple docstring""" from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(UpperCAmelCase_ ) def __lowerCamelCase ( UpperCAmelCase_ : Dict ): """simple docstring""" from transformers.testing_utils import pytest_terminal_summary_main a :Union[str, Any] = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ ) def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): """simple docstring""" if exitstatus == 5: a :Union[str, Any] = 0 # Doctest custom flag to ignore output. snake_case : str = doctest.register_optionflag('''IGNORE_RESULT''') snake_case : Tuple = doctest.OutputChecker class _snake_case ( _snake_case ): def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) snake_case : int = CustomOutputChecker snake_case : Union[str, Any] = HfDoctestModule snake_case : Any = HfDocTestParser
94
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , snake_case__ ) A : Dict = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: A : Dict = dataset_size < in_memory_max_size else: A : Tuple = False A : int = is_small_dataset(snake_case__ ) assert result == expected
3
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType UpperCAmelCase : Any = logging.get_logger(__name__) UpperCAmelCase : List[str] = { """openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""", } # fmt: off UpperCAmelCase : Optional[Any] = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 357, 366, 438, 532, 685, 705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377, 1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211, 4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786, 11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791, 17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409, 34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361 ] UpperCAmelCase : Tuple = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627, 3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647, 7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793, 14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675, 22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865, 42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362 ] class __lowerCAmelCase ( UpperCamelCase__): _lowercase : List[str] = """whisper""" _lowercase : List[Any] = ["""past_key_values"""] _lowercase : str = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self , lowerCAmelCase__=5_1_8_6_5 , lowerCAmelCase__=8_0 , lowerCAmelCase__=6 , lowerCAmelCase__=4 , lowerCAmelCase__=6 , lowerCAmelCase__=4 , lowerCAmelCase__=1_5_3_6 , lowerCAmelCase__=1_5_3_6 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=5_0_2_5_7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="gelu" , lowerCAmelCase__=2_5_6 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=False , lowerCAmelCase__=1_5_0_0 , lowerCAmelCase__=4_4_8 , lowerCAmelCase__=5_0_2_5_6 , lowerCAmelCase__=5_0_2_5_6 , lowerCAmelCase__=5_0_2_5_6 , lowerCAmelCase__=None , lowerCAmelCase__=[2_2_0, 5_0_2_5_6] , lowerCAmelCase__=False , lowerCAmelCase__=2_5_6 , lowerCAmelCase__=False , lowerCAmelCase__=0.05 , lowerCAmelCase__=1_0 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1_0 , lowerCAmelCase__=0 , lowerCAmelCase__=7 , **lowerCAmelCase__ , ) -> Any: '''simple docstring''' a__ : Optional[Any] =vocab_size a__ : List[str] =num_mel_bins a__ : List[str] =d_model a__ : Optional[Any] =encoder_layers a__ : Union[str, Any] =encoder_attention_heads a__ : List[Any] =decoder_layers a__ : Dict =decoder_attention_heads a__ : str =decoder_ffn_dim a__ : int =encoder_ffn_dim a__ : Tuple =dropout a__ : Optional[Any] =attention_dropout a__ : List[str] =activation_dropout a__ : str =activation_function a__ : Dict =init_std a__ : List[str] =encoder_layerdrop a__ : Optional[Any] =decoder_layerdrop a__ : Optional[Any] =use_cache a__ : Tuple =encoder_layers a__ : int =scale_embedding # scale factor will be sqrt(d_model) if True a__ : Union[str, Any] =max_source_positions a__ : Union[str, Any] =max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. a__ : List[Any] =classifier_proj_size a__ : int =use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 a__ : str =apply_spec_augment a__ : Union[str, Any] =mask_time_prob a__ : Dict =mask_time_length a__ : int =mask_time_min_masks a__ : Optional[int] =mask_feature_prob a__ : List[Any] =mask_feature_length a__ : Tuple =mask_feature_min_masks a__ : List[str] =median_filter_width super().__init__( pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , suppress_tokens=lowerCAmelCase__ , begin_suppress_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , ) class __lowerCAmelCase ( UpperCamelCase__): @property def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' a__ : List[str] =OrderedDict( [ ("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}), ] ) if self.use_past: a__ : str ={0: "batch"} else: a__ : List[str] ={0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase__ , direction="inputs" ) return common_inputs def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = 2_2_0_5_0 , lowerCAmelCase__ = 5.0 , lowerCAmelCase__ = 2_2_0 , ) -> Mapping[str, Any]: '''simple docstring''' a__ : str =OrderedDict() a__ : Optional[int] =OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCAmelCase__ , framework=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , time_duration=lowerCAmelCase__ , frequency=lowerCAmelCase__ , ) a__ : Optional[Any] =encoder_inputs["input_features"].shape[2] a__ : Dict =encoder_sequence_length // 2 if self.use_past else seq_length a__ : List[str] =super().generate_dummy_inputs( preprocessor.tokenizer , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) a__ : Optional[Any] =encoder_inputs.pop("input_features" ) a__ : Optional[Any] =decoder_inputs.pop("decoder_input_ids" ) if "past_key_values" in decoder_inputs: a__ : List[Any] =decoder_inputs.pop("past_key_values" ) return dummy_inputs @property def _lowercase ( self ) -> float: '''simple docstring''' return 1E-3
95
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class A ( __snake_case ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
3
0
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
96
'''simple docstring''' from scipy.stats import pearsonr import datasets lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" if return_pvalue: A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
3
0
'''simple docstring''' import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def a ( __a , __a , __a ) -> Any: '''simple docstring''' UpperCamelCase__ :List[Any] = MobileBertConfig.from_json_file(__a ) print(f'''Building PyTorch model from configuration: {config}''' ) UpperCamelCase__ :List[str] = MobileBertForPreTraining(__a ) # Load weights from tf checkpoint UpperCamelCase__ :str = load_tf_weights_in_mobilebert(__a , __a , __a ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __a ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--mobilebert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained MobileBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __snake_case = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
97
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowercase : Dict = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[Any] = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[str] = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Dict = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
3
0
"""simple docstring""" import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters lowerCAmelCase__ : Union[str, Any] = logging.get_logger(__name__) def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ): # Recurse if needed if "." in tensor_name: UpperCAmelCase__ = tensor_name.split('.' ) for split in splits[:-1]: UpperCAmelCase__ = getattr(lowerCamelCase , lowerCamelCase ) if new_module is None: raise ValueError(f'''{module} has no attribute {split}.''' ) UpperCAmelCase__ = new_module UpperCAmelCase__ = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f'''{module} does not have a parameter or a buffer named {tensor_name}.''' ) UpperCAmelCase__ = tensor_name in module._buffers UpperCAmelCase__ = getattr(lowerCamelCase , lowerCamelCase ) if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None: raise ValueError(f'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' ) UpperCAmelCase__ = False UpperCAmelCase__ = False if is_buffer or not is_bitsandbytes_available(): UpperCAmelCase__ = False UpperCAmelCase__ = False else: UpperCAmelCase__ = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) UpperCAmelCase__ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: UpperCAmelCase__ = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: UpperCAmelCase__ = old_value.to(lowerCamelCase ) elif isinstance(lowerCamelCase , torch.Tensor ): UpperCAmelCase__ = value.to('cpu' ) if value.dtype == torch.inta: UpperCAmelCase__ = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse( '0.37.2' ) if not is_abit_serializable: raise ValueError( 'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ' 'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' ) else: UpperCAmelCase__ = torch.tensor(lowerCamelCase , device='cpu' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , lowerCamelCase ) and fpaa_statistics is None: UpperCAmelCase__ = new_value.T UpperCAmelCase__ = old_value.__dict__ if is_abit: UpperCAmelCase__ = bnb.nn.IntaParams(lowerCamelCase , requires_grad=lowerCamelCase , **lowerCamelCase ).to(lowerCamelCase ) elif is_abit: UpperCAmelCase__ = bnb.nn.Paramsabit(lowerCamelCase , requires_grad=lowerCamelCase , **lowerCamelCase ).to(lowerCamelCase ) UpperCAmelCase__ = new_value if fpaa_statistics is not None: setattr(module.weight , 'SCB' , fpaa_statistics.to(lowerCamelCase ) ) else: if value is None: UpperCAmelCase__ = old_value.to(lowerCamelCase ) elif isinstance(lowerCamelCase , torch.Tensor ): UpperCAmelCase__ = value.to(lowerCamelCase ) else: UpperCAmelCase__ = torch.tensor(lowerCamelCase , device=lowerCamelCase ) if is_buffer: UpperCAmelCase__ = new_value else: UpperCAmelCase__ = nn.Parameter(lowerCamelCase , requires_grad=old_value.requires_grad ) UpperCAmelCase__ = new_value def a_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=False ): for name, module in model.named_children(): if current_key_name is None: UpperCAmelCase__ = [] current_key_name.append(lowerCamelCase ) if (isinstance(lowerCamelCase , nn.Linear ) or isinstance(lowerCamelCase , lowerCamelCase )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '.'.join(lowerCamelCase ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ , UpperCAmelCase__ = module.weight.shape else: UpperCAmelCase__ = module.in_features UpperCAmelCase__ = module.out_features if quantization_config.quantization_method() == "llm_int8": UpperCAmelCase__ = bnb.nn.LinearabitLt( lowerCamelCase , lowerCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) UpperCAmelCase__ = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: UpperCAmelCase__ = bnb.nn.Linearabit( lowerCamelCase , lowerCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) UpperCAmelCase__ = True # Store the module class in case we need to transpose the weight later UpperCAmelCase__ = type(lowerCamelCase ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(lowerCamelCase ) if len(list(module.children() ) ) > 0: UpperCAmelCase__ , UpperCAmelCase__ = _replace_with_bnb_linear( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_been_replaced=lowerCamelCase , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def a_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None ): UpperCAmelCase__ = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert UpperCAmelCase__ , UpperCAmelCase__ = _replace_with_bnb_linear( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def a_ ( *lowerCamelCase , **lowerCamelCase ): warnings.warn( '`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , lowerCamelCase , ) return replace_with_bnb_linear(*lowerCamelCase , **lowerCamelCase ) def a_ ( *lowerCamelCase , **lowerCamelCase ): warnings.warn( '`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , lowerCamelCase , ) return set_module_quantized_tensor_to_device(*lowerCamelCase , **lowerCamelCase ) def a_ ( lowerCamelCase ): UpperCAmelCase__ = deepcopy(lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() UpperCAmelCase__ = find_tied_parameters(lowerCamelCase ) # For compatibility with Accelerate < 0.18 if isinstance(lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: UpperCAmelCase__ = sum(lowerCamelCase , [] ) UpperCAmelCase__ = len(lowerCamelCase ) > 0 # Check if it is a base model UpperCAmelCase__ = not hasattr(lowerCamelCase , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head UpperCAmelCase__ = list(model.named_children() ) UpperCAmelCase__ = [list_modules[-1][0]] # add last module together with tied weights UpperCAmelCase__ = set(lowerCamelCase ) - set(lowerCamelCase ) UpperCAmelCase__ = list(set(lowerCamelCase ) ) + list(lowerCamelCase ) # remove ".weight" from the keys UpperCAmelCase__ = ['.weight', '.bias'] UpperCAmelCase__ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: UpperCAmelCase__ = name.replace(lowerCamelCase , '' ) filtered_module_names.append(lowerCamelCase ) return filtered_module_names
98
'''simple docstring''' import os import sys import unittest lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py') lowercase : Optional[int] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py') class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Any = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : List[Any] = {'''BertModelTest''': '''BertModelTester'''} A : int = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : List[str] = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } A : Union[str, Any] = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : int = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Dict = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } A : str = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
3
0
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position lowercase : str = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip lowercase : Optional[Any] = concatenate_datasets lowercase : List[Any] = DownloadConfig lowercase : List[str] = DownloadManager lowercase : int = DownloadMode lowercase : Optional[Any] = DownloadConfig lowercase : Union[str, Any] = DownloadMode lowercase : Dict = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
99
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class A ( __snake_case ): __magic_name__ = DistilBertTokenizer __magic_name__ = DistilBertTokenizerFast __magic_name__ = True @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[Any] = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' ) A : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE ) A : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
3
0
"""simple docstring""" from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" __lowercase : torch.FloatTensor class SCREAMING_SNAKE_CASE_ ( __a , __a ): """simple docstring""" @register_to_config def __init__( self , lowerCAmelCase__ = 1_6 , lowerCAmelCase__ = 8_8 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = "geglu" , lowerCAmelCase__ = True , lowerCAmelCase__ = True , ): super().__init__() __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = attention_head_dim __SCREAMING_SNAKE_CASE = num_attention_heads * attention_head_dim __SCREAMING_SNAKE_CASE = in_channels __SCREAMING_SNAKE_CASE = torch.nn.GroupNorm(num_groups=lowerCAmelCase__ , num_channels=lowerCAmelCase__ , eps=1E-6 , affine=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__) # 3. Define transformers blocks __SCREAMING_SNAKE_CASE = nn.ModuleList( [ BasicTransformerBlock( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dropout=lowerCAmelCase__ , cross_attention_dim=lowerCAmelCase__ , activation_fn=lowerCAmelCase__ , attention_bias=lowerCAmelCase__ , double_self_attention=lowerCAmelCase__ , norm_elementwise_affine=lowerCAmelCase__ , ) for d in range(lowerCAmelCase__) ]) __SCREAMING_SNAKE_CASE = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__) def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=1 , lowerCAmelCase__=None , lowerCAmelCase__ = True , ): __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = hidden_states.shape __SCREAMING_SNAKE_CASE = batch_frames // num_frames __SCREAMING_SNAKE_CASE = hidden_states __SCREAMING_SNAKE_CASE = hidden_states[None, :].reshape(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) __SCREAMING_SNAKE_CASE = hidden_states.permute(0 , 2 , 1 , 3 , 4) __SCREAMING_SNAKE_CASE = self.norm(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = hidden_states.permute(0 , 3 , 4 , 2 , 1).reshape(batch_size * height * width , lowerCAmelCase__ , lowerCAmelCase__) __SCREAMING_SNAKE_CASE = self.proj_in(lowerCAmelCase__) # 2. Blocks for block in self.transformer_blocks: __SCREAMING_SNAKE_CASE = block( lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , timestep=lowerCAmelCase__ , cross_attention_kwargs=lowerCAmelCase__ , class_labels=lowerCAmelCase__ , ) # 3. Output __SCREAMING_SNAKE_CASE = self.proj_out(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = ( hidden_states[None, None, :] .reshape(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) .permute(0 , 3 , 4 , 1 , 2) .contiguous() ) __SCREAMING_SNAKE_CASE = hidden_states.reshape(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) __SCREAMING_SNAKE_CASE = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=lowerCAmelCase__)
100
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase : Optional[int] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''input_features''', '''attention_mask'''] def __init__( self , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=16000 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" super().__init__(feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = num_mel_bins A : Tuple = do_ceptral_normalize A : Dict = normalize_means A : List[Any] = normalize_vars A : List[str] = True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers A : Any = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) A : Any = ta_kaldi.fbank(SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray: """simple docstring""" if normalize_means: A : Dict = x[:input_length].mean(axis=0 ) A : Optional[Any] = np.subtract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if normalize_vars: A : str = x[:input_length].std(axis=0 ) A : int = np.divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if input_length < x.shape[0]: A : List[str] = padding_value # make sure array is in float32 A : Tuple = x.astype(np.floataa ) return x def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]: """simple docstring""" A : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' F' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A : List[Any] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) A : Tuple = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ): A : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A : Any = [raw_speech] # extract fbank features A : List[str] = [self._extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in raw_speech] # convert into correct format for padding A : str = BatchFeature({'''input_features''': features} ) A : Union[str, Any] = self.pad( SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) # make sure list is in array format A : List[str] = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ): A : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features] A : Union[str, Any] = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: A : Dict = ( np.array(SCREAMING_SNAKE_CASE , dtype=np.intaa ) if self._get_padding_strategies(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD else None ) A : List[Any] = self.normalize( padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE ) if return_tensors is not None: A : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE ) return padded_inputs
3
0
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): lowercase_ : List[Any] =IFInpaintingPipeline lowercase_ : Optional[int] =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} lowercase_ : Any =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase_ : str =PipelineTesterMixin.required_optional_params - {'''latents'''} def A__ ( self): return self._get_dummy_components() def A__ ( self ,A__ ,A__=0): if str(A__).startswith('''mps'''): lowercase = torch.manual_seed(A__) else: lowercase = torch.Generator(device=A__).manual_seed(A__) lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(A__)).to(A__) lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(A__)).to(A__) lowercase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,) def A__ ( self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) def A__ ( self): self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' ,reason='''float16 requires CUDA''') def A__ ( self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1) def A__ ( self): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2) def A__ ( self): self._test_save_load_local() def A__ ( self): self._test_inference_batch_single_identical( expected_max_diff=1E-2 ,)
101
'''simple docstring''' import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 lowercase : str = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowercase : str = get_tests_dir('fixtures/vocab.json') lowercase : int = get_tests_dir('fixtures') class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = 0 def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : List[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Union[str, Any] = WavaVecaConfig() A : List[str] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) # save in new folder model_config.save_pretrained(SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Dict = WavaVecaFeatureExtractor() A : List[str] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in tokenizer with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : List[Any] = WavaVecaFeatureExtractor() A : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in feature extractor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : str = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : str = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' ) model_config.save_pretrained(SCREAMING_SNAKE_CASE ) # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) # create emtpy sample processor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write('''{}''' ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Union[str, Any] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) A : List[str] = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) A : Tuple = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version A : List[str] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE ) A : int = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE ): AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API A : List[Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Tuple = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : Optional[int] = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Any = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = '''AutoFeatureExtractor''' __magic_name__ = '''AutoTokenizer''' __magic_name__ = False try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local classes. A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. A : Optional[int] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. A : Tuple = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : int = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' ) self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' ) @is_staging_test class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def __lowerCAmelCase ( cls ) -> Dict: """simple docstring""" A : Optional[int] = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE ) @classmethod def __lowerCAmelCase ( cls ) -> Any: """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' ) except HTTPError: pass def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Union[str, Any] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A : int = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor-org''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization='''valid_org''' , ) A : int = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() A : Any = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : str = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F'{USER}/test-dynamic-processor' , token=self._token ) A : List[str] = Repository(SCREAMING_SNAKE_CASE , clone_from=F'{USER}/test-dynamic-processor' , token=self._token ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { '''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''', '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) self.assertDictEqual( tokenizer_config['''auto_map'''] , { '''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None], '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_feature_extraction.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_tokenization.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_processing.py''' ) ) ) repo.push_to_hub() A : Optional[int] = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
3
0
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _UpperCAmelCase : '''simple docstring''' @staticmethod def SCREAMING_SNAKE_CASE (*a_ , **a_ ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_timm @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =MODEL_FOR_OBJECT_DETECTION_MAPPING def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Any = ObjectDetectionPipeline(model=a_ , image_processor=a_ ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : Tuple = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 ) self.assertGreater(len(a_ ) , 0 ) for detected_object in outputs: self.assertEqual( a_ , { '''score''': ANY(a_ ), '''label''': ANY(a_ ), '''box''': {'''xmin''': ANY(a_ ), '''ymin''': ANY(a_ ), '''xmax''': ANY(a_ ), '''ymax''': ANY(a_ )}, } , ) import datasets __snake_case : Dict = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) __snake_case : Tuple = [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] __snake_case : List[Any] = object_detector(a_ , threshold=0.0 ) self.assertEqual(len(a_ ) , len(a_ ) ) for outputs in batch_outputs: self.assertGreater(len(a_ ) , 0 ) for detected_object in outputs: self.assertEqual( a_ , { '''score''': ANY(a_ ), '''label''': ANY(a_ ), '''box''': {'''xmin''': ANY(a_ ), '''ymin''': ANY(a_ ), '''xmax''': ANY(a_ ), '''ymax''': ANY(a_ )}, } , ) @require_tf @unittest.skip('''Object detection not implemented in TF''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass @require_torch def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = '''hf-internal-testing/tiny-detr-mobilenetsv3''' __snake_case : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(a_ ) __snake_case : Dict = AutoFeatureExtractor.from_pretrained(a_ ) __snake_case : Optional[Any] = ObjectDetectionPipeline(model=a_ , feature_extractor=a_ ) __snake_case : Dict = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}}, ] , ) __snake_case : Dict = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] , threshold=0.0 , ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}}, ], [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}}, ], ] , ) @require_torch @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = '''facebook/detr-resnet-50''' __snake_case : int = AutoModelForObjectDetection.from_pretrained(a_ ) __snake_case : int = AutoFeatureExtractor.from_pretrained(a_ ) __snake_case : List[Any] = ObjectDetectionPipeline(model=a_ , feature_extractor=a_ ) __snake_case : Union[str, Any] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}}, ] , ) __snake_case : Optional[Any] = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}}, ], [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}}, ], ] , ) @require_torch @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = '''facebook/detr-resnet-50''' __snake_case : Any = pipeline('''object-detection''' , model=a_ ) __snake_case : Dict = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}}, ] , ) __snake_case : Optional[Any] = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}}, ], [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}}, ], ] , ) @require_torch @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = 0.9985 __snake_case : Optional[int] = '''facebook/detr-resnet-50''' __snake_case : Any = pipeline('''object-detection''' , model=a_ ) __snake_case : Optional[Any] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=a_ ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}}, ] , ) @require_torch @require_pytesseract @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = '''Narsil/layoutlmv3-finetuned-funsd''' __snake_case : Optional[Any] = 0.9993 __snake_case : int = pipeline('''object-detection''' , model=a_ , threshold=a_ ) __snake_case : Dict = object_detector( '''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ {'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_94, '''ymin''': 2_54, '''xmax''': 3_43, '''ymax''': 2_64}}, {'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_94, '''ymin''': 2_54, '''xmax''': 3_43, '''ymax''': 2_64}}, ] , )
102
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowercase : Optional[Any] = None lowercase : Tuple = logging.get_logger(__name__) lowercase : Dict = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'} lowercase : Tuple = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, 'tokenizer_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json', }, } lowercase : List[str] = { 'google/rembert': 2_56, } lowercase : Dict = '▁' class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = RemBertTokenizer def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token super().__init__( SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : List[Any] = do_lower_case A : str = remove_space A : int = keep_accents A : Union[str, Any] = vocab_file A : List[Any] = False if not self.vocab_file else True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : List[Any] = [self.sep_token_id] A : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : Tuple = [self.sep_token_id] A : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) ) return A : Any = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
3
0
from __future__ import annotations def UpperCamelCase( __UpperCamelCase : str ): return [ord(__UpperCamelCase ) - 96 for elem in plain] def UpperCamelCase( __UpperCamelCase : list[int] ): return "".join(chr(elem + 96 ) for elem in encoded ) def UpperCamelCase( ): lowerCAmelCase_ : Dict = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''' ,__UpperCamelCase ) print('''Decoded:''' ,decode(__UpperCamelCase ) ) if __name__ == "__main__": main()
103
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase : Optional[Any] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''pixel_values'''] def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : str = size if size is not None else {'''shortest_edge''': 384} A : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : str = do_resize A : List[Any] = size # Default value set here for backwards compatibility where the value in config is None A : List[Any] = crop_pct if crop_pct is not None else 224 / 256 A : Optional[int] = resample A : Union[str, Any] = do_rescale A : List[str] = rescale_factor A : Union[str, Any] = do_normalize A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : str = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) A : Any = size['''shortest_edge'''] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct A : Dict = int(shortest_edge / crop_pct ) A : str = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : int = resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) else: # warping (no cropping) when evaluated at 384 or larger return resize( SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image: """simple docstring""" A : int = do_resize if do_resize is not None else self.do_resize A : Tuple = crop_pct if crop_pct is not None else self.crop_pct A : Optional[Any] = resample if resample is not None else self.resample A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean A : List[str] = image_std if image_std is not None else self.image_std A : Union[str, Any] = size if size is not None else self.size A : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : Any = make_list_of_images(SCREAMING_SNAKE_CASE ) if not valid_images(SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. A : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images] if do_resize: A : Any = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , crop_pct=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: A : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: A : Dict = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images] A : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images] A : Optional[int] = {'''pixel_values''': images} return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
3
0
'''simple docstring''' def _A ( A__ ): """simple docstring""" __lowercase = generate_pascal_triangle(A__ ) for row_idx in range(A__ ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=''' ''' ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=''' ''' ) else: print(triangle[row_idx][col_idx] , end='''''' ) print() def _A ( A__ ): """simple docstring""" if not isinstance(A__ , A__ ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) __lowercase = [] for current_row_idx in range(A__ ): __lowercase = populate_current_row(A__ , A__ ) triangle.append(A__ ) return triangle def _A ( A__ , A__ ): """simple docstring""" __lowercase = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 __lowercase , __lowercase = 1, 1 for current_col_idx in range(1 , A__ ): calculate_current_element( A__ , A__ , A__ , A__ ) return current_row def _A ( A__ , A__ , A__ , A__ , ): """simple docstring""" __lowercase = triangle[current_row_idx - 1][current_col_idx - 1] __lowercase = triangle[current_row_idx - 1][current_col_idx] __lowercase = above_to_left_elt + above_to_right_elt def _A ( A__ ): """simple docstring""" if not isinstance(A__ , A__ ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) __lowercase = [[1]] for row_index in range(1 , A__ ): __lowercase = [0] + result[-1] + [0] __lowercase = row_index + 1 # Calculate the number of distinct elements in a row __lowercase = sum(divmod(A__ , 2 ) ) __lowercase = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] __lowercase = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() __lowercase = row_first_half + row_second_half result.append(A__ ) return result def _A ( ): """simple docstring""" from collections.abc import Callable from timeit import timeit def benchmark_a_function(A__ , A__ ) -> None: __lowercase = F"{func.__name__}({value})" __lowercase = timeit(F"__main__.{call}" , setup='''import __main__''' ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F"{call:38} -- {timing:.4f} seconds" ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(A__ , A__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
104
'''simple docstring''' import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[str] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> int: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Any = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: A : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: A : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : int = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ): A : List[Any] = FlaxAutoModel.from_pretrained('''bert-base''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): A : Optional[int] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ): A : List[str] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , '''Use `from_pt=True` to load this model''' ): A : Any = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
3
0
"""simple docstring""" import unittest import numpy as np def _SCREAMING_SNAKE_CASE ( _lowercase : np.ndarray , _lowercase : np.ndarray , _lowercase : np.ndarray , _lowercase : np.ndarray | None = None , ) ->np.ndarray: '''simple docstring''' a : Tuple = np.shape(_lowercase ) a : Optional[int] = np.shape(_lowercase ) a : Any = np.shape(_lowercase ) if shape_a[0] != shape_b[0]: a : Dict = ( "Expected the same number of rows for A and B. " F"""Instead found A of size {shape_a} and B of size {shape_b}""" ) raise ValueError(_lowercase ) if shape_b[1] != shape_c[1]: a : int = ( "Expected the same number of columns for B and C. " F"""Instead found B of size {shape_b} and C of size {shape_c}""" ) raise ValueError(_lowercase ) a : List[str] = pseudo_inv if a_inv is None: try: a : Union[str, Any] = np.linalg.inv(_lowercase ) except np.linalg.LinAlgError: raise ValueError( "Input matrix A is not invertible. Cannot compute Schur complement." ) return mat_c - mat_b.T @ a_inv @ mat_b class __UpperCamelCase ( unittest.TestCase ): def __a ( self ) -> None: a : List[str] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) a : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) a : int = np.array([[2, 1], [6, 3]] ) a : str = schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) a : Tuple = np.block([[a, b], [b.T, c]] ) a : Tuple = np.linalg.det(lowerCAmelCase__ ) a : int = np.linalg.det(lowerCAmelCase__ ) a : List[Any] = np.linalg.det(lowerCAmelCase__ ) self.assertAlmostEqual(lowerCAmelCase__ , det_a * det_s ) def __a ( self ) -> None: a : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) a : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) a : Optional[Any] = np.array([[2, 1], [6, 3]] ) with self.assertRaises(lowerCAmelCase__ ): schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> None: a : Dict = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) a : Tuple = np.array([[0, 3], [3, 0], [2, 3]] ) a : Tuple = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(lowerCAmelCase__ ): schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
105
'''simple docstring''' import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowercase : Union[str, Any] = { 'text_branch': 'text_model', 'audio_branch': 'audio_model.audio_encoder', 'attn': 'attention.self', 'self.proj': 'output.dense', 'attention.self_mask': 'attn_mask', 'mlp.fc1': 'intermediate.dense', 'mlp.fc2': 'output.dense', 'norm1': 'layernorm_before', 'norm2': 'layernorm_after', 'bn0': 'batch_norm', } lowercase : Tuple = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc') def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' A, A : Tuple = create_model( '''HTSAT-tiny''' , '''roberta''' , snake_case__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case__ , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = {} A : str = R'''.*sequential.(\d+).*''' A : Union[str, Any] = R'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: A : Any = key.replace(snake_case__ , snake_case__ ) if re.match(snake_case__ , snake_case__ ): # replace sequential layers with list A : Any = re.match(snake_case__ , snake_case__ ).group(1 ) A : List[str] = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case__ )//3}.linear.' ) elif re.match(snake_case__ , snake_case__ ): A : Union[str, Any] = int(re.match(snake_case__ , snake_case__ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... A : str = 1 if projecton_layer == 0 else 2 A : Optional[Any] = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' ) if "audio" and "qkv" in key: # split qkv into query key and value A : int = value A : List[Any] = mixed_qkv.size(0 ) // 3 A : Union[str, Any] = mixed_qkv[:qkv_dim] A : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2] A : Optional[int] = mixed_qkv[qkv_dim * 2 :] A : Tuple = query_layer A : Union[str, Any] = key_layer A : Optional[int] = value_layer else: A : Dict = value return model_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' A, A : int = init_clap(snake_case__ , enable_fusion=snake_case__ ) clap_model.eval() A : str = clap_model.state_dict() A : Union[str, Any] = rename_state_dict(snake_case__ ) A : Tuple = ClapConfig() A : str = enable_fusion A : str = ClapModel(snake_case__ ) # ignore the spectrogram embedding layer model.load_state_dict(snake_case__ , strict=snake_case__ ) model.save_pretrained(snake_case__ ) transformers_config.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not') lowercase : Tuple = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
3
0
"""simple docstring""" import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __UpperCamelCase : str = logging.get_logger(__name__) __UpperCamelCase : Union[str, Any] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } __UpperCamelCase : List[Any] = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , A_ ): for attribute in key.split('''.''' ): lowerCAmelCase__ : Any = getattr(A_ , A_ ) if weight_type is not None: lowerCAmelCase__ : Tuple = getattr(A_ , A_ ).shape else: lowerCAmelCase__ : int = hf_pointer.shape assert hf_shape == value.shape, ( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": lowerCAmelCase__ : Any = value elif weight_type == "weight_g": lowerCAmelCase__ : List[str] = value elif weight_type == "weight_v": lowerCAmelCase__ : Union[str, Any] = value elif weight_type == "bias": lowerCAmelCase__ : Dict = value else: lowerCAmelCase__ : Union[str, Any] = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def __SCREAMING_SNAKE_CASE ( A_ , A_ ): lowerCAmelCase__ : Tuple = [] lowerCAmelCase__ : Union[str, Any] = fairseq_model.state_dict() lowerCAmelCase__ : List[str] = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight lowerCAmelCase__ : Any = None for name, value in fairseq_dict.items(): lowerCAmelCase__ : Dict = False if "conv_layers" in name: load_conv_layer( A_ , A_ , A_ , A_ , hf_model.config.feat_extract_norm == '''group''' , ) lowerCAmelCase__ : List[Any] = True elif name.split('''.''' )[0] == "proj": lowerCAmelCase__ : List[Any] = fairseq_model.proj lowerCAmelCase__ : str = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: lowerCAmelCase__ : Optional[Any] = True if "*" in mapped_key: lowerCAmelCase__ : List[Any] = name.split(A_ )[0].split('''.''' )[-2] lowerCAmelCase__ : Optional[Any] = mapped_key.replace('''*''' , A_ ) if "weight_g" in name: lowerCAmelCase__ : Dict = '''weight_g''' elif "weight_v" in name: lowerCAmelCase__ : Dict = '''weight_v''' elif "bias" in name: lowerCAmelCase__ : List[Any] = '''bias''' elif "weight" in name: lowerCAmelCase__ : Union[str, Any] = '''weight''' else: lowerCAmelCase__ : str = None set_recursively(A_ , A_ , A_ , A_ , A_ ) continue if not is_used: unused_weights.append(A_ ) logger.warning(f'Unused weights: {unused_weights}' ) return proj_weight def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , A_ ): lowerCAmelCase__ : List[Any] = full_name.split('''conv_layers.''' )[-1] lowerCAmelCase__ : int = name.split('''.''' ) lowerCAmelCase__ : Tuple = int(items[0] ) lowerCAmelCase__ : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) lowerCAmelCase__ : Dict = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) lowerCAmelCase__ : Optional[int] = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) lowerCAmelCase__ : Dict = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) lowerCAmelCase__ : Dict = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(A_ ) def __SCREAMING_SNAKE_CASE ( A_ ): lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = emb.weight.shape lowerCAmelCase__ : List[str] = nn.Linear(A_ , A_ , bias=A_ ) lowerCAmelCase__ : Optional[Any] = emb.weight.data return lin_layer def __SCREAMING_SNAKE_CASE ( A_ ): with open(A_ , '''r''' , encoding='''utf-8''' ) as f: lowerCAmelCase__ : str = f.readlines() lowerCAmelCase__ : Any = [line.split(''' ''' )[0] for line in lines] lowerCAmelCase__ : Tuple = len(A_ ) lowerCAmelCase__ : Dict = { '''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3, } vocab_dict.update(dict(zip(A_ , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): lowerCAmelCase__ : Optional[int] = WavaVecaConfig.from_pretrained(A_ ) lowerCAmelCase__ : Dict = SpeechaTextaConfig.from_pretrained( A_ , vocab_size=A_ , decoder_layers=A_ , do_stable_layer_norm=A_ ) lowerCAmelCase__ : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=A_ , return_attention_mask=A_ , ) lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) lowerCAmelCase__ : Optional[int] = model[0].eval() # set weights for wav2vec2 encoder lowerCAmelCase__ : str = WavaVecaModel(A_ ) lowerCAmelCase__ : Dict = recursively_load_weights_wavaveca(model.encoder , A_ ) lowerCAmelCase__ : List[Any] = SpeechaTextaForCausalLM(A_ ) lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=A_ ) # set output linear layer unexpected_keys.remove('''embed_out''' ) lowerCAmelCase__ : Optional[Any] = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' ) logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' ) lowerCAmelCase__ : Union[str, Any] = SpeechEncoderDecoderModel(encoder=A_ , decoder=A_ ) lowerCAmelCase__ : Union[str, Any] = False # add projection layer lowerCAmelCase__ : Optional[int] = nn.Parameter(projection_layer.weight ) lowerCAmelCase__ : List[Any] = nn.Parameter(projection_layer.bias ) lowerCAmelCase__ : str = create_vocab_dict(A_ ) with open(os.path.join(A_ , '''vocab.json''' ) , '''w''' ) as fp: json.dump(A_ , A_ ) lowerCAmelCase__ : str = SpeechaTextaTokenizer(os.path.join(A_ , '''vocab.json''' ) ) tokenizer.save_pretrained(A_ ) lowerCAmelCase__ : Any = hf_wavavec.config.to_dict() lowerCAmelCase__ : Any = tokenizer.pad_token_id lowerCAmelCase__ : Optional[Any] = tokenizer.bos_token_id lowerCAmelCase__ : str = tokenizer.eos_token_id lowerCAmelCase__ : Optional[Any] = '''speech_to_text_2''' lowerCAmelCase__ : List[str] = '''wav2vec2''' lowerCAmelCase__ : Dict = SpeechEncoderDecoderConfig.from_dict(A_ ) hf_wavavec.save_pretrained(A_ ) feature_extractor.save_pretrained(A_ ) if __name__ == "__main__": __UpperCamelCase : Dict = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument( '''--encoder_config_path''', default='''facebook/wav2vec2-large-lv60''', type=str, help='''Path to hf encoder wav2vec2 checkpoint config''', ) parser.add_argument( '''--decoder_config_path''', default='''facebook/s2t-small-mustc-en-fr-st''', type=str, help='''Path to hf decoder s2t checkpoint config''', ) parser.add_argument('''--vocab_size''', default=1_0_2_2_4, type=int, help='''Vocab size of decoder''') parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''') __UpperCamelCase : List[Any] = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
106
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging lowercase : Dict = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: A : Union[str, Any] = os.path.abspath(snake_case__ ) logger.info(F'Loading PyTorch weights from {pt_path}' ) A : Any = torch.load(snake_case__ , map_location='''cpu''' ) logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' ) A : List[str] = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files A : Any = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ ) return flax_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' def is_key_or_prefix_key_in_dict(snake_case__ ) -> bool: return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0 # layer norm A : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean A : Tuple = pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var A : Dict = pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # embedding A : Any = pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # conv layer A : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ): A : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer A : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ): A : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight A : Dict = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias A : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 A : Dict = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): A : List[Any] = pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): A : List[str] = pt_tuple_key[-2] + '''_v''' if name is not None: A : int = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} A : int = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: A : List[str] = flax_model.params['''params'''] else: A : Dict = flax_model.params A : List[Any] = flatten_dict(snake_case__ ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : List[str] = flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(snake_case__ ) A : int = {} A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : int = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : str = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : Union[str, Any] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Any = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Dict = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : Any = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: A : Tuple = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : List[str] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch # Load the index A : Union[str, Any] = {} for shard_file in shard_filenames: # load using msgpack utils A : List[str] = torch.load(snake_case__ ) A : int = {k: v.numpy() for k, v in pt_state_dict.items()} A : Tuple = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : Optional[int] = flax_model.params['''params'''] A : List[Any] = flatten_dict(snake_case__ ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: A : Dict = flax_model.params A : Tuple = flatten_dict(snake_case__ ) A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : List[str] = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : int = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : List[str] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Union[str, Any] = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Any = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : int = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue if "var" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = os.path.abspath(snake_case__ ) logger.info(F'Loading Flax weights from {flax_checkpoint_path}' ) # import correct flax class A : List[str] = getattr(snake_case__ , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(snake_case__ , '''rb''' ) as state_f: try: A : int = from_bytes(snake_case__ , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights A : List[str] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) A : Optional[Any] = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) A : Union[str, Any] = flatten_dict(snake_case__ ) A : List[Any] = pt_model.state_dict() A : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) A : Tuple = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys A : int = [] A : Any = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): A : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix A : int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: A : List[str] = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: A : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict: # conv layer A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) A : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict: # linear layer A : Tuple = flax_key_tuple[:-1] + ('''weight''',) A : Tuple = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: A : List[Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: A : Union[str, Any] = '''.'''.join(snake_case__ ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. A : int = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: A : Optional[int] = key.split('''.''' ) A : Dict = None if key_components[-3::2] == ["parametrizations", "original0"]: A : List[str] = key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: A : List[Any] = key_components[-2] + '''_v''' if name is not None: A : str = key_components[:-3] + [name] A : Optional[Any] = '''.'''.join(snake_case__ ) A : Optional[Any] = key if flax_key in special_pt_names: A : Optional[Any] = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict A : Dict = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor A : Dict = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list A : List[Any] = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' ) if len(snake_case__ ) > 0: logger.warning( F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ''' use it for predictions and inference.''' ) else: logger.warning( F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'you can already use {pt_model.__class__.__name__} for predictions without further training.' ) return pt_model
3
0
import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case__ (_UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = RobertaTokenizer SCREAMING_SNAKE_CASE_ : List[str] = RobertaTokenizerFast SCREAMING_SNAKE_CASE_ : int = True SCREAMING_SNAKE_CASE_ : Optional[Any] = {"""cls_token""": """<s>"""} def __UpperCAmelCase ( self : str ) -> Tuple: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt a = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] a = {"unk_token": "<unk>"} a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__lowerCamelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__lowerCamelCase ) ) def __UpperCAmelCase ( self : Any , **__lowerCamelCase : str ) -> Any: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] , **__lowerCamelCase : List[Any] ) -> List[Any]: kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : str , __lowerCamelCase : List[str] ) -> Any: a = "lower newer" a = "lower newer" return input_text, output_text def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]: a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) a = "lower newer" a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) a = tokens + [tokenizer.unk_token] a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase ) def __UpperCAmelCase ( self : List[str] ) -> Tuple: a = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def __UpperCAmelCase ( self : Dict ) -> List[Any]: a = self.tokenizer_class.from_pretrained("roberta-base" ) a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase ) a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase ) a = tokenizer.encode( "sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase ) a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]: a = self.get_tokenizer() a = "Encode this sequence." a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]] # Testing encoder arguments a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(__lowerCamelCase , __lowerCamelCase ) a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) tokenizer.add_special_tokens({"bos_token": "<s>"} ) a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(__lowerCamelCase , __lowerCamelCase ) # Testing spaces after special tokens a = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space a = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) a = "Encode <mask> sequence" a = "Encode <mask>sequence" a = tokenizer.encode(__lowerCamelCase ) a = encoded.index(__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) a = tokenizer.encode(__lowerCamelCase ) a = encoded.index(__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : int ) -> int: pass def __UpperCAmelCase ( self : Optional[Any] ) -> Any: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) a = "A, <mask> AllenNLP sentence." a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( __lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( __lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): a = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase ) self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase ) self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase ) def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): a = "hello" # `hello` is a token in the vocabulary of `pretrained_name` a = f"""{text_of_1_token} {text_of_1_token}""" a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = f""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
107
'''simple docstring''' import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint lowercase : Optional[int] = { '169M': 12, '430M': 24, '1B5': 24, '3B': 32, '7B': 32, '14B': 40, } lowercase : Optional[Any] = { '169M': 7_68, '430M': 10_24, '1B5': 20_48, '3B': 25_60, '7B': 40_96, '14B': 51_20, } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[Any] = list(state_dict.keys() ) for name in state_dict_keys: A : str = state_dict.pop(snake_case__ ) # emb -> embedding if name.startswith('''emb.''' ): A : Optional[Any] = name.replace('''emb.''' , '''embeddings.''' ) # ln_0 -> pre_ln (only present at block 0) if name.startswith('''blocks.0.ln0''' ): A : Union[str, Any] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' ) # att -> attention A : int = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , snake_case__ ) # ffn -> feed_forward A : List[Any] = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , snake_case__ ) # time_mix_k -> time_mix_key and reshape if name.endswith('''.time_mix_k''' ): A : List[str] = name.replace('''.time_mix_k''' , '''.time_mix_key''' ) # time_mix_v -> time_mix_value and reshape if name.endswith('''.time_mix_v''' ): A : Union[str, Any] = name.replace('''.time_mix_v''' , '''.time_mix_value''' ) # time_mix_r -> time_mix_key and reshape if name.endswith('''.time_mix_r''' ): A : Union[str, Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' ) if name != "head.weight": A : List[Any] = '''rwkv.''' + name A : Dict = weight return state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=None ): '''simple docstring''' if tokenizer_file is None: print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' ) A : int = 5_0277 A : Optional[int] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' ) else: A : str = PreTrainedTokenizerFast(tokenizer_file=snake_case__ ) A : Any = len(snake_case__ ) tokenizer.save_pretrained(snake_case__ ) # 2. Build the config A : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: A : List[str] = candidate break if size is None: raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' ) if size not in possible_sizes: raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' ) A : Any = RwkvConfig( vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(snake_case__ ) # 3. Download model file then convert state_dict A : Union[str, Any] = hf_hub_download(snake_case__ , snake_case__ ) A : Tuple = torch.load(snake_case__ , map_location='''cpu''' ) A : List[Any] = convert_state_dict(snake_case__ ) # 4. Split in shards and save A, A : List[str] = shard_checkpoint(snake_case__ ) for shard_file, shard in shards.items(): torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) if index is not None: A : Dict = os.path.join(snake_case__ , snake_case__ ) # Save the index as well with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: A : List[Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n''' f.write(snake_case__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( '''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' ) A : List[Any] = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: A : Union[str, Any] = torch.load(os.path.join(snake_case__ , snake_case__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' ) A : int = AutoModelForCausalLM.from_pretrained(snake_case__ ) model.push_to_hub(snake_case__ , max_shard_size='''2GB''' ) tokenizer.push_to_hub(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.' ) parser.add_argument( '--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.' ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='Where to save the converted model.' ) parser.add_argument( '--tokenizer_file', default=None, type=str, help='Path to the tokenizer file to use (if not provided, only the model is converted).', ) parser.add_argument( '--size', default=None, type=str, help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Push to the Hub the converted model.', ) parser.add_argument( '--model_name', default=None, type=str, help='Name of the pushed model on the Hub, including the username / organization.', ) lowercase : Union[str, Any] = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
3
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ): """simple docstring""" a : Optional[Any] =KandinskyVaaControlnetPipeline a : Optional[Any] =["image_embeds", "negative_image_embeds", "hint"] a : Tuple =["image_embeds", "negative_image_embeds", "hint"] a : Dict =[ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] a : int =False @property def lowercase__ ( self ): """simple docstring""" return 32 @property def lowercase__ ( self ): """simple docstring""" return 32 @property def lowercase__ ( self ): """simple docstring""" return self.time_input_dim @property def lowercase__ ( self ): """simple docstring""" return self.time_input_dim * 4 @property def lowercase__ ( self ): """simple docstring""" return 100 @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase : Any = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } lowerCAmelCase : Optional[Any] = UNetaDConditionModel(**snake_case__ ) return model @property def lowercase__ ( self ): """simple docstring""" return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs ) return model def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Union[str, Any] = self.dummy_unet lowerCAmelCase : Optional[Any] = self.dummy_movq lowerCAmelCase : str = DDIMScheduler( num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type="epsilon" , thresholding=snake_case__ , ) lowerCAmelCase : Optional[Any] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def lowercase__ ( self , snake_case__ , snake_case__=0 ): """simple docstring""" lowerCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) lowerCAmelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( snake_case__ ) # create hint lowerCAmelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) if str(snake_case__ ).startswith("mps" ): lowerCAmelCase : Union[str, Any] = torch.manual_seed(snake_case__ ) else: lowerCAmelCase : str = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) lowerCAmelCase : Any = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : List[Any] = "cpu" lowerCAmelCase : Optional[int] = self.get_dummy_components() lowerCAmelCase : int = self.pipeline_class(**snake_case__ ) lowerCAmelCase : Tuple = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) lowerCAmelCase : Dict = pipe(**self.get_dummy_inputs(snake_case__ ) ) lowerCAmelCase : List[Any] = output.images lowerCAmelCase : Tuple = pipe( **self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0] lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] lowerCAmelCase : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase : Any = np.array( [0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def lowercase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : int = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" ) lowerCAmelCase : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png" ) lowerCAmelCase : Any = torch.from_numpy(np.array(snake_case__ ) ).float() / 255.0 lowerCAmelCase : Optional[Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) lowerCAmelCase : Optional[Any] = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(snake_case__ ) lowerCAmelCase : Dict = KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa ) lowerCAmelCase : Union[str, Any] = pipeline.to(snake_case__ ) pipeline.set_progress_bar_config(disable=snake_case__ ) lowerCAmelCase : Dict = "A robot, 4k photo" lowerCAmelCase : Union[str, Any] = torch.Generator(device="cuda" ).manual_seed(0 ) lowerCAmelCase , lowerCAmelCase : Union[str, Any] = pipe_prior( snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() lowerCAmelCase : Dict = torch.Generator(device="cuda" ).manual_seed(0 ) lowerCAmelCase : Tuple = pipeline( image_embeds=snake_case__ , negative_image_embeds=snake_case__ , hint=snake_case__ , generator=snake_case__ , num_inference_steps=100 , output_type="np" , ) lowerCAmelCase : Optional[int] = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(snake_case__ , snake_case__ )
108
'''simple docstring''' from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING lowercase : str = logging.get_logger(__name__) @add_end_docstrings(__snake_case ) class A ( __snake_case ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) self.check_model_type(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A, A : Dict = {}, {} if padding is not None: A : List[str] = padding if truncation is not None: A : Dict = truncation if top_k is not None: A : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : int = {'''image''': image, '''question''': question} else: A : Any = image A : Any = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) return results def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" A : Union[str, Any] = load_image(inputs['''image'''] ) A : Optional[Any] = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE ) A : Dict = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(SCREAMING_SNAKE_CASE ) return model_inputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : List[Any] = self.model(**SCREAMING_SNAKE_CASE ) return model_outputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> int: """simple docstring""" if top_k > self.model.config.num_labels: A : Dict = self.model.config.num_labels if self.framework == "pt": A : Optional[int] = model_outputs.logits.sigmoid()[0] A, A : int = probs.topk(SCREAMING_SNAKE_CASE ) else: raise ValueError(F'Unsupported framework: {self.framework}' ) A : int = scores.tolist() A : List[str] = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
3
0
"""simple docstring""" import flax.linen as nn import jax import jax.numpy as jnp class SCREAMING_SNAKE_CASE__ ( nn.Module ): __lowerCAmelCase : int __lowerCAmelCase : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : List[str] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = hidden_states.shape UpperCAmelCase : str = jax.image.resize( _SCREAMING_SNAKE_CASE , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , ) UpperCAmelCase : Optional[int] = self.conv(_SCREAMING_SNAKE_CASE ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): __lowerCAmelCase : int __lowerCAmelCase : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase : Tuple = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , _SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' UpperCAmelCase : Any = self.conv(_SCREAMING_SNAKE_CASE ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): __lowerCAmelCase : int __lowerCAmelCase : int = None __lowerCAmelCase : float = 0.0 __lowerCAmelCase : bool = None __lowerCAmelCase : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels UpperCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) UpperCAmelCase : Optional[Any] = nn.Conv( _SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) UpperCAmelCase : str = nn.Dense(_SCREAMING_SNAKE_CASE , dtype=self.dtype ) UpperCAmelCase : str = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) UpperCAmelCase : List[str] = nn.Dropout(self.dropout_prob ) UpperCAmelCase : str = nn.Conv( _SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) UpperCAmelCase : Tuple = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut UpperCAmelCase : Dict = None if use_nin_shortcut: UpperCAmelCase : str = nn.Conv( _SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , ) def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Tuple = hidden_states UpperCAmelCase : Dict = self.norma(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = nn.swish(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[int] = self.conva(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = self.time_emb_proj(nn.swish(_SCREAMING_SNAKE_CASE ) ) UpperCAmelCase : Optional[int] = jnp.expand_dims(jnp.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , 1 ) UpperCAmelCase : Union[str, Any] = hidden_states + temb UpperCAmelCase : Any = self.norma(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Union[str, Any] = nn.swish(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : int = self.dropout(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = self.conva(_SCREAMING_SNAKE_CASE ) if self.conv_shortcut is not None: UpperCAmelCase : Dict = self.conv_shortcut(_SCREAMING_SNAKE_CASE ) return hidden_states + residual
109
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase : Union[str, Any] = logging.get_logger(__name__) lowercase : str = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class A ( __snake_case ): __magic_name__ = '''bert''' def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = vocab_size A : Optional[Any] = hidden_size A : List[Any] = num_hidden_layers A : List[str] = num_attention_heads A : Dict = hidden_act A : Optional[Any] = intermediate_size A : List[Any] = hidden_dropout_prob A : List[Any] = attention_probs_dropout_prob A : Optional[Any] = max_position_embeddings A : List[str] = type_vocab_size A : Dict = initializer_range A : str = layer_norm_eps A : int = position_embedding_type A : Dict = use_cache A : str = classifier_dropout class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A : Optional[int] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
3
0
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent lowerCAmelCase = {'UserAgent': UserAgent().random} def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = script.contents[0] lowercase__ = json.loads(data[data.find('''{"config"''' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class _a : def __init__( self: List[str] , UpperCamelCase_: Optional[Any] ) -> Tuple: """simple docstring""" lowercase__ = f'https://www.instagram.com/{username}/' lowercase__ = self.get_json() def lowerCamelCase_ ( self: Any ) -> dict: """simple docstring""" lowercase__ = requests.get(self.url , headers=UpperCamelCase_ ).text lowercase__ = BeautifulSoup(UpperCamelCase_ , '''html.parser''' ).find_all('''script''' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self: Union[str, Any] ) -> str: """simple docstring""" return f'{self.__class__.__name__}(\'{self.username}\')' def __str__( self: Tuple ) -> str: """simple docstring""" return f'{self.fullname} ({self.username}) is {self.biography}' @property def lowerCamelCase_ ( self: List[Any] ) -> str: """simple docstring""" return self.user_data["username"] @property def lowerCamelCase_ ( self: int ) -> str: """simple docstring""" return self.user_data["full_name"] @property def lowerCamelCase_ ( self: str ) -> str: """simple docstring""" return self.user_data["biography"] @property def lowerCamelCase_ ( self: str ) -> str: """simple docstring""" return self.user_data["business_email"] @property def lowerCamelCase_ ( self: int ) -> str: """simple docstring""" return self.user_data["external_url"] @property def lowerCamelCase_ ( self: Dict ) -> int: """simple docstring""" return self.user_data["edge_followed_by"]["count"] @property def lowerCamelCase_ ( self: Optional[Any] ) -> int: """simple docstring""" return self.user_data["edge_follow"]["count"] @property def lowerCamelCase_ ( self: Union[str, Any] ) -> int: """simple docstring""" return self.user_data["edge_owner_to_timeline_media"]["count"] @property def lowerCamelCase_ ( self: int ) -> str: """simple docstring""" return self.user_data["profile_pic_url_hd"] @property def lowerCamelCase_ ( self: Optional[int] ) -> bool: """simple docstring""" return self.user_data["is_verified"] @property def lowerCamelCase_ ( self: str ) -> bool: """simple docstring""" return self.user_data["is_private"] def _a ( SCREAMING_SNAKE_CASE = "github" ): """simple docstring""" import os if os.environ.get('''CI''' ): return # test failing on GitHub Actions lowercase__ = InstagramUser(SCREAMING_SNAKE_CASE ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , SCREAMING_SNAKE_CASE ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_50 assert instagram_user.number_of_followers > 12_00_00 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('''https://instagram.''' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase = InstagramUser('github') print(instagram_user) print(f"""{instagram_user.number_of_posts = }""") print(f"""{instagram_user.number_of_followers = }""") print(f"""{instagram_user.number_of_followings = }""") print(f"""{instagram_user.email = }""") print(f"""{instagram_user.website = }""") print(f"""{instagram_user.profile_picture_url = }""") print(f"""{instagram_user.is_verified = }""") print(f"""{instagram_user.is_private = }""")
110
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : str = BeautifulSoup(requests.get(snake_case__ , params=snake_case__ ).content , '''html.parser''' ) A : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) A : Optional[int] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": lowercase : str = { 'title': ( 'Precisely geometry controlled microsupercapacitors for ultrahigh areal ' 'capacitance, volumetric capacitance, and energy density' ), 'journal': 'Chem. Mater.', 'volume': 30, 'pages': '3979-3990', 'year': 20_18, 'hl': 'en', } print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
3
0
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase : List[str] = logging.get_logger(__name__) __UpperCamelCase : List[Any] = [ ['attention', 'attn'], ['encoder_attention', 'encoder_attn'], ['q_lin', 'q_proj'], ['k_lin', 'k_proj'], ['v_lin', 'v_proj'], ['out_lin', 'out_proj'], ['norm_embeddings', 'layernorm_embedding'], ['position_embeddings', 'embed_positions'], ['embeddings', 'embed_tokens'], ['ffn.lin', 'fc'], ] def A ( _lowercase ): if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: SCREAMING_SNAKE_CASE : Union[str, Any] = k.replace(snake_case__ , snake_case__ ) if k.startswith('''encoder''' ): SCREAMING_SNAKE_CASE : List[Any] = k.replace('''.attn''' , '''.self_attn''' ) SCREAMING_SNAKE_CASE : Any = k.replace('''norm1''' , '''self_attn_layer_norm''' ) SCREAMING_SNAKE_CASE : str = k.replace('''norm2''' , '''final_layer_norm''' ) elif k.startswith('''decoder''' ): SCREAMING_SNAKE_CASE : str = k.replace('''norm1''' , '''self_attn_layer_norm''' ) SCREAMING_SNAKE_CASE : Tuple = k.replace('''norm2''' , '''encoder_attn_layer_norm''' ) SCREAMING_SNAKE_CASE : Any = k.replace('''norm3''' , '''final_layer_norm''' ) return k def A ( _lowercase ): SCREAMING_SNAKE_CASE : Optional[int] = [ '''model.encoder.layernorm_embedding.weight''', '''model.encoder.layernorm_embedding.bias''', '''model.decoder.layernorm_embedding.weight''', '''model.decoder.layernorm_embedding.bias''', ] for k in keys: SCREAMING_SNAKE_CASE : Optional[Any] = sd.pop(snake_case__ ) SCREAMING_SNAKE_CASE : Tuple = k.replace('''layernorm_embedding''' , '''layer_norm''' ) assert new_k not in sd SCREAMING_SNAKE_CASE : List[str] = v __UpperCamelCase : Any = ['START'] @torch.no_grad() def A ( _lowercase , _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case__ , map_location='''cpu''' ) SCREAMING_SNAKE_CASE : Any = model['''model'''] SCREAMING_SNAKE_CASE : Dict = BlenderbotConfig.from_json_file(snake_case__ ) SCREAMING_SNAKE_CASE : List[Any] = BlenderbotForConditionalGeneration(snake_case__ ) SCREAMING_SNAKE_CASE : Optional[int] = m.model.state_dict().keys() SCREAMING_SNAKE_CASE : Optional[Any] = [] SCREAMING_SNAKE_CASE : List[Any] = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue SCREAMING_SNAKE_CASE : List[Any] = rename_state_dict_key(snake_case__ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: SCREAMING_SNAKE_CASE : List[str] = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(snake_case__ ) m.model.load_state_dict(snake_case__ , strict=snake_case__ ) m.half() m.save_pretrained(snake_case__ ) if __name__ == "__main__": __UpperCamelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin') parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.') parser.add_argument( '--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use' ) __UpperCamelCase : int = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
182
'''simple docstring''' class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Any = None A : Optional[Any] = None A : Tuple = graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Dict = len(SCREAMING_SNAKE_CASE ) A : Optional[Any] = None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if sources is int: A : Dict = [sources] if sinks is int: A : str = [sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return A : Optional[int] = sources[0] A : Union[str, Any] = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: A : Optional[int] = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A : Dict = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A : Dict = max_input_flow A : Tuple = 0 A : Tuple = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A : Optional[Any] = max_input_flow A : Optional[Any] = size - 1 def __lowerCAmelCase ( self ) -> Any: """simple docstring""" if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : List[Any] = algorithm(self ) class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = flow_network A : Optional[Any] = flow_network.verticesCount A : Tuple = flow_network.sourceIndex A : Dict = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A : str = flow_network.graph A : Optional[Any] = False def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" if not self.executed: self._algorithm() A : Optional[int] = True def __lowerCAmelCase ( self ) -> Any: """simple docstring""" pass class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result A : List[str] = -1 def __lowerCAmelCase ( self ) -> str: """simple docstring""" if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) A : Optional[Any] = [[0] * self.verticies_count for i in range(self.verticies_count )] A : Union[str, Any] = [0] * self.verticies_count A : List[Any] = [0] * self.verticies_count def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A : Optional[Any] = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A : Union[str, Any] = 0 while i < len(SCREAMING_SNAKE_CASE ): A : str = vertices_list[i] A : List[str] = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) A : int = 0 else: i += 1 A : Optional[Any] = sum(self.preflow[self.source_index] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Dict = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Dict = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A : Dict = self.heights[to_index] if min_height is not None: A : Dict = min_height + 1 if __name__ == "__main__": lowercase : Optional[int] = [0] lowercase : List[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowercase : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowercase : List[str] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowercase : List[str] = flow_network.find_maximum_flow() print(f'''maximum flow is {maximum_flow}''')
3
0
'''simple docstring''' from copy import deepcopy class lowerCAmelCase__ : def __init__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" if arr is None and size is not None: lowercase_ : List[Any] = size lowercase_ : Any = [0] * size elif arr is not None: self.init(__SCREAMING_SNAKE_CASE ) else: raise ValueError('''Either arr or size must be specified''' ) def _snake_case ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = deepcopy(__SCREAMING_SNAKE_CASE ) for i in range(1 , self.size ): lowercase_ : List[str] = self.next_(__SCREAMING_SNAKE_CASE ) if j < self.size: self.tree[j] += self.tree[i] def _snake_case ( self ): """simple docstring""" lowercase_ : Tuple = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): lowercase_ : Dict = self.next_(__SCREAMING_SNAKE_CASE ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def _snake_case ( __SCREAMING_SNAKE_CASE ): """simple docstring""" return index + (index & (-index)) @staticmethod def _snake_case ( __SCREAMING_SNAKE_CASE ): """simple docstring""" return index - (index & (-index)) def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value lowercase_ : Optional[int] = self.next_(__SCREAMING_SNAKE_CASE ) def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" self.add(__SCREAMING_SNAKE_CASE , value - self.get(__SCREAMING_SNAKE_CASE ) ) def _snake_case ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if right == 0: return 0 lowercase_ : Tuple = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] lowercase_ : Optional[Any] = self.prev(__SCREAMING_SNAKE_CASE ) return result def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" return self.prefix(__SCREAMING_SNAKE_CASE ) - self.prefix(__SCREAMING_SNAKE_CASE ) def _snake_case ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return self.query(__SCREAMING_SNAKE_CASE , index + 1 ) def _snake_case ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" value -= self.tree[0] if value < 0: return -1 lowercase_ : Tuple = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 lowercase_ : int = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
93
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ = 10 ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ) or n < 0: raise ValueError('''Invalid input''' ) A : List[str] = 10**n A : Tuple = 2_8433 * (pow(2 , 783_0457 , snake_case__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(10) = }''')
3
0
from __future__ import annotations from random import choice def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> str: """simple docstring""" return choice(snake_case__ ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = random_pivot(snake_case__ ) # partition based on pivot # linear time SCREAMING_SNAKE_CASE__ = [e for e in lst if e < pivot] SCREAMING_SNAKE_CASE__ = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(snake_case__ ) == k - 1: return pivot # pivot is in elements bigger than k elif len(snake_case__ ) < k - 1: return kth_number(snake_case__ , k - len(snake_case__ ) - 1 ) # pivot is in elements smaller than k else: return kth_number(snake_case__ , snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
219
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowercase : List[str] = logging.get_logger(__name__) lowercase : str = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class A ( __snake_case ): __magic_name__ = '''gpt_neo''' __magic_name__ = ['''past_key_values'''] __magic_name__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , SCREAMING_SNAKE_CASE=50257 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=24 , SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=50256 , SCREAMING_SNAKE_CASE=50256 , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" A : Union[str, Any] = vocab_size A : Optional[Any] = max_position_embeddings A : Dict = hidden_size A : Optional[Any] = num_layers A : Tuple = num_heads A : int = intermediate_size A : Optional[Any] = window_size A : List[Any] = activation_function A : Union[str, Any] = resid_dropout A : Any = embed_dropout A : List[Any] = attention_dropout A : str = classifier_dropout A : List[Any] = layer_norm_epsilon A : str = initializer_range A : List[str] = use_cache A : Optional[int] = bos_token_id A : List[Any] = eos_token_id A : int = attention_types A : int = self.expand_attention_types_params(SCREAMING_SNAKE_CASE ) if len(self.attention_layers ) != self.num_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.attention_layers)` == `config.num_layers` ''' F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, ' F'`config.num_layers = {self.num_layers}`. ' '''`config.attention_layers` is prepared using `config.attention_types`. ''' '''Please verify the value of `config.attention_types` argument.''' ) super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : List[str] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' import torch A : Tuple = input.size() A : Union[str, Any] = len(snake_case__ ) A : List[str] = shape[dimension] A : Union[str, Any] = torch.arange(0 , snake_case__ , snake_case__ ) A : List[str] = torch.div(sizedim - size , snake_case__ , rounding_mode='''floor''' ) + 1 A : Optional[int] = torch.arange(snake_case__ ) + low_indices[:min_length][:, None] A : str = [slice(snake_case__ )] * rank A : List[Any] = indices A : Union[str, Any] = input[s] A : List[str] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch A : List[str] = torch.arange(1 , snake_case__ ) A : Optional[int] = torch.remainder(snake_case__ , snake_case__ ) A : Optional[int] = remainders == 0 A : Optional[Any] = candidates[divisor_indices] A : Optional[int] = torch.max(snake_case__ ) return largest_divisor, torch.div(snake_case__ , snake_case__ , rounding_mode='''floor''' ) class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" A : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction='''inputs''' ) A : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: A : Dict = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return self._config.num_heads def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]: """simple docstring""" A : List[str] = super(SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs( SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() A : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch A, A : Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values A : str = seqlen + 2 A : List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A : Any = [ (torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] A : str = common_inputs['''attention_mask'''] if self.use_past: A : Optional[int] = ordered_inputs['''attention_mask'''].dtype A : List[str] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 ) return ordered_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return 13
3
0
import copy import re class _lowerCAmelCase : _lowercase ='''hp''' _lowercase ={} _lowercase =None @classmethod def __a ( cls , _UpperCamelCase , _UpperCamelCase ) -> List[Any]: lowerCAmelCase_ = prefix lowerCAmelCase_ = defaults cls.build_naming_info() @staticmethod def __a ( _UpperCamelCase , _UpperCamelCase ) -> List[str]: if len(_UpperCamelCase ) == 0: return "" lowerCAmelCase_ = None if any(char.isdigit() for char in word ): raise Exception(f"""Parameters should not contain numbers: \'{word}\' contains a number""" ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(_UpperCamelCase ) + 1 ): lowerCAmelCase_ = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: lowerCAmelCase_ = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(_UpperCamelCase ): lowerCAmelCase_ = '''''' while integer != 0: lowerCAmelCase_ = chr(ord("A" ) + integer % 10 ) + s integer //= 10 return s lowerCAmelCase_ = 0 while True: lowerCAmelCase_ = word + '''#''' + int_to_alphabetic(_UpperCamelCase ) if sword in info["reverse_short_word"]: continue else: lowerCAmelCase_ = sword break lowerCAmelCase_ = short_word lowerCAmelCase_ = word return short_word @staticmethod def __a ( _UpperCamelCase , _UpperCamelCase ) -> int: lowerCAmelCase_ = param_name.split("_" ) lowerCAmelCase_ = [TrialShortNamer.shortname_for_word(_UpperCamelCase , _UpperCamelCase ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name lowerCAmelCase_ = ['''''', '''_'''] for separator in separators: lowerCAmelCase_ = separator.join(_UpperCamelCase ) if shortname not in info["reverse_short_param"]: lowerCAmelCase_ = shortname lowerCAmelCase_ = param_name return shortname return param_name @staticmethod def __a ( _UpperCamelCase , _UpperCamelCase ) -> Any: lowerCAmelCase_ = TrialShortNamer.shortname_for_key(_UpperCamelCase , _UpperCamelCase ) lowerCAmelCase_ = short_name lowerCAmelCase_ = param_name @classmethod def __a ( cls ) -> Union[str, Any]: if cls.NAMING_INFO is not None: return lowerCAmelCase_ = { '''short_word''': {}, '''reverse_short_word''': {}, '''short_param''': {}, '''reverse_short_param''': {}, } lowerCAmelCase_ = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(_UpperCamelCase , _UpperCamelCase ) lowerCAmelCase_ = info @classmethod def __a ( cls , _UpperCamelCase ) -> Tuple: cls.build_naming_info() assert cls.PREFIX is not None lowerCAmelCase_ = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue lowerCAmelCase_ = cls.NAMING_INFO['''short_param'''][k] if isinstance(_UpperCamelCase , _UpperCamelCase ): lowerCAmelCase_ = 1 if v else 0 lowerCAmelCase_ = '''''' if isinstance(_UpperCamelCase , (int, float) ) else '''-''' lowerCAmelCase_ = f"""{key}{sep}{v}""" name.append(_UpperCamelCase ) return "_".join(_UpperCamelCase ) @classmethod def __a ( cls , _UpperCamelCase ) -> Union[str, Any]: lowerCAmelCase_ = repr[len(cls.PREFIX ) + 1 :] if repr == "": lowerCAmelCase_ = [] else: lowerCAmelCase_ = repr.split("_" ) lowerCAmelCase_ = {} for value in values: if "-" in value: lowerCAmelCase_ = value.split("-" ) else: lowerCAmelCase_ = re.sub("[0-9.]" , "" , _UpperCamelCase ) lowerCAmelCase_ = float(re.sub("[^0-9.]" , "" , _UpperCamelCase ) ) lowerCAmelCase_ = cls.NAMING_INFO['''reverse_short_param'''][p_k] lowerCAmelCase_ = p_v for k in cls.DEFAULTS: if k not in parameters: lowerCAmelCase_ = cls.DEFAULTS[k] return parameters
231
'''simple docstring''' import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = [] A : Union[str, Any] = [] for i in range(self.num_layers ): A : Any = self.in_channels if i == 0 else self.out_channels A : Optional[Any] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Optional[int] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnets A : Union[str, Any] = attentions if self.add_downsample: A : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Union[str, Any]: """simple docstring""" A : Optional[Any] = () for resnet, attn in zip(self.resnets , self.attentions ): A : int = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Dict = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = [] for i in range(self.num_layers ): A : Optional[Any] = self.in_channels if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets if self.add_downsample: A : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]: """simple docstring""" A : str = () for resnet in self.resnets: A : Optional[int] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[int] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[Any] = [] A : Optional[int] = [] for i in range(self.num_layers ): A : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : Dict = self.prev_output_channel if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : int = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets A : Optional[Any] = attentions if self.add_upsample: A : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[int]: """simple docstring""" for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states A : List[str] = res_hidden_states_tuple[-1] A : int = res_hidden_states_tuple[:-1] A : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Tuple = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : Dict = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : int = [] for i in range(self.num_layers ): A : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : List[str] = self.prev_output_channel if i == 0 else self.out_channels A : str = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[Any] = resnets if self.add_upsample: A : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Tuple: """simple docstring""" for resnet in self.resnets: # pop res hidden states A : Optional[int] = res_hidden_states_tuple[-1] A : Optional[Any] = res_hidden_states_tuple[:-1] A : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Optional[Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : List[str] = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : str = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] A : List[Any] = [] for _ in range(self.num_layers ): A : int = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[str] = resnets A : List[str] = attentions def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict: """simple docstring""" A : Optional[Any] = self.resnets[0](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): A : Optional[int] = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) return hidden_states
3
0
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowerCamelCase : List[str] = logging.get_logger(__name__) lowerCamelCase : str = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class __lowerCAmelCase (__snake_case ): '''simple docstring''' lowerCAmelCase__ : List[Any] = """gpt_neo""" lowerCAmelCase__ : Dict = ["""past_key_values"""] lowerCAmelCase__ : Optional[int] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__(self : int , UpperCamelCase : List[str]=50257 , UpperCamelCase : List[Any]=2048 , UpperCamelCase : Dict=2048 , UpperCamelCase : Optional[Any]=24 , UpperCamelCase : Dict=[[["global", "local"], 12]] , UpperCamelCase : Any=16 , UpperCamelCase : Any=None , UpperCamelCase : Tuple=256 , UpperCamelCase : Optional[Any]="gelu_new" , UpperCamelCase : Union[str, Any]=0.0 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : Union[str, Any]=0.0 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Tuple=1E-5 , UpperCamelCase : Union[str, Any]=0.02 , UpperCamelCase : Optional[int]=True , UpperCamelCase : List[str]=50256 , UpperCamelCase : Dict=50256 , **UpperCamelCase : int , ): '''simple docstring''' lowercase__ = vocab_size lowercase__ = max_position_embeddings lowercase__ = hidden_size lowercase__ = num_layers lowercase__ = num_heads lowercase__ = intermediate_size lowercase__ = window_size lowercase__ = activation_function lowercase__ = resid_dropout lowercase__ = embed_dropout lowercase__ = attention_dropout lowercase__ = classifier_dropout lowercase__ = layer_norm_epsilon lowercase__ = initializer_range lowercase__ = use_cache lowercase__ = bos_token_id lowercase__ = eos_token_id lowercase__ = attention_types lowercase__ = self.expand_attention_types_params(UpperCamelCase ) if len(self.attention_layers ) != self.num_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.attention_layers)` == `config.num_layers` ''' f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, " f"`config.num_layers = {self.num_layers}`. " '''`config.attention_layers` is prepared using `config.attention_types`. ''' '''Please verify the value of `config.attention_types` argument.''' ) super().__init__(bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase ) @staticmethod def UpperCamelCase__ (UpperCamelCase : Dict ): '''simple docstring''' lowercase__ = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def _SCREAMING_SNAKE_CASE (A , A , A , A ) -> List[str]: """simple docstring""" import torch lowercase__ = input.size() lowercase__ = len(snake_case__ ) lowercase__ = shape[dimension] lowercase__ = torch.arange(0 , snake_case__ , snake_case__ ) lowercase__ = torch.div(sizedim - size , snake_case__ , rounding_mode='''floor''' ) + 1 lowercase__ = torch.arange(snake_case__ ) + low_indices[:min_length][:, None] lowercase__ = [slice(snake_case__ )] * rank lowercase__ = indices lowercase__ = input[s] lowercase__ = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(snake_case__ ) def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]: """simple docstring""" import torch lowercase__ = torch.arange(1 , snake_case__ ) lowercase__ = torch.remainder(snake_case__ , snake_case__ ) lowercase__ = remainders == 0 lowercase__ = candidates[divisor_indices] lowercase__ = torch.max(snake_case__ ) return largest_divisor, torch.div(snake_case__ , snake_case__ , rounding_mode='''floor''' ) class __lowerCAmelCase (__snake_case ): '''simple docstring''' @property def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(UpperCamelCase , direction='''inputs''' ) lowercase__ = {0: '''batch''', 1: '''past_sequence + sequence'''} else: lowercase__ = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' return self._config.num_heads def UpperCamelCase__ (self : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : str = -1 , UpperCamelCase : Tuple = -1 , UpperCamelCase : Tuple = False , UpperCamelCase : Optional[Any] = None , ): '''simple docstring''' lowercase__ = super(UpperCamelCase , self ).generate_dummy_inputs( UpperCamelCase , batch_size=UpperCamelCase , seq_length=UpperCamelCase , is_pair=UpperCamelCase , framework=UpperCamelCase ) # We need to order the input in the way they appears in the forward() lowercase__ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowercase__ = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowercase__ = seqlen + 2 lowercase__ = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowercase__ = [ (torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase )) for _ in range(self.num_layers ) ] lowercase__ = common_inputs['''attention_mask'''] if self.use_past: lowercase__ = ordered_inputs['''attention_mask'''].dtype lowercase__ = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(UpperCamelCase , UpperCamelCase , dtype=UpperCamelCase )] , dim=1 ) return ordered_inputs @property def UpperCamelCase__ (self : Dict ): '''simple docstring''' return 13
2
'''simple docstring''' import os def lowerCAmelCase_ ( ): '''simple docstring''' A : List[Any] = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' ) with open(snake_case__ ) as file_hand: return str(sum(int(snake_case__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
3
0
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Optional[int]: if discount_rate < 0: raise ValueError('''Discount rate cannot be negative''' ) if not cash_flows: raise ValueError('''Cash flows list cannot be empty''' ) lowerCAmelCase = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(snake_case__ ) ) return round(snake_case__ , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
338
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , snake_case__ ) A : Dict = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: A : Dict = dataset_size < in_memory_max_size else: A : Tuple = False A : int = is_small_dataset(snake_case__ ) assert result == expected
3
0
def _UpperCAmelCase ( a__): '''simple docstring''' a_ : str = [] if len(snake_case__) == 1: return [nums.copy()] for _ in range(len(snake_case__)): a_ : Dict = nums.pop(0) a_ : int = permute(snake_case__) for perm in permutations: perm.append(snake_case__) result.extend(snake_case__) nums.append(snake_case__) return result def _UpperCAmelCase ( a__): '''simple docstring''' def backtrack(a__): if start == len(snake_case__) - 1: output.append(nums[:]) else: for i in range(snake_case__ , len(snake_case__)): a_ : Any = nums[i], nums[start] backtrack(start + 1) a_ : List[str] = nums[i], nums[start] # backtrack a_ : Optional[Any] = [] backtrack(0) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function __snake_case : Union[str, Any] = permutea([1, 2, 3]) print(res) doctest.testmod()
248
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class A ( __snake_case ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
3
0
from ..utils import DummyObject, requires_backends class __snake_case ( metaclass=__snake_case ): _a : Any= ["transformers", "torch", "note_seq"] def __init__( self ,*snake_case ,**snake_case ): '''simple docstring''' requires_backends(self ,["""transformers""", """torch""", """note_seq"""] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls ,*snake_case ,**snake_case ): '''simple docstring''' requires_backends(cls ,["""transformers""", """torch""", """note_seq"""] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls ,*snake_case ,**snake_case ): '''simple docstring''' requires_backends(cls ,["""transformers""", """torch""", """note_seq"""] )
20
'''simple docstring''' from scipy.stats import pearsonr import datasets lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" if return_pvalue: A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
3
0
from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class SCREAMING_SNAKE_CASE_ ( __snake_case ): __magic_name__: Dict = "EncodecFeatureExtractor" __magic_name__: Dict = ("T5Tokenizer", "T5TokenizerFast") def __init__( self : str , _A : int , _A : str ) -> Optional[int]: """simple docstring""" super().__init__(_A , _A ) snake_case_ : List[Any] = self.feature_extractor snake_case_ : Union[str, Any] = False def UpperCAmelCase_ ( self : Any , _A : List[Any]=None , _A : Optional[int]=None , _A : List[str]=True ) -> Tuple: """simple docstring""" return self.tokenizer.get_decoder_prompt_ids(task=_A , language=_A , no_timestamps=_A ) def __call__( self : List[Any] , *_A : Optional[int] , **_A : Optional[Any] ) -> List[str]: """simple docstring""" if self._in_target_context_manager: return self.current_processor(*_A , **_A ) snake_case_ : Any = kwargs.pop('audio' , _A ) snake_case_ : Optional[int] = kwargs.pop('sampling_rate' , _A ) snake_case_ : Any = kwargs.pop('text' , _A ) if len(_A ) > 0: snake_case_ : List[Any] = args[0] snake_case_ : Dict = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if text is not None: snake_case_ : Tuple = self.tokenizer(_A , **_A ) if audio is not None: snake_case_ : List[str] = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A ) if audio is None: return inputs elif text is None: return audio_inputs else: snake_case_ : Optional[Any] = audio_inputs['''input_values'''] if "padding_mask" in audio_inputs: snake_case_ : Optional[int] = audio_inputs['''padding_mask'''] return inputs def UpperCAmelCase_ ( self : Tuple , *_A : Dict , **_A : Dict ) -> int: """simple docstring""" snake_case_ : Optional[Any] = kwargs.pop('audio' , _A ) snake_case_ : Optional[int] = kwargs.pop('padding_mask' , _A ) if len(_A ) > 0: snake_case_ : List[str] = args[0] snake_case_ : Union[str, Any] = args[1:] if audio_values is not None: return self._decode_audio(_A , padding_mask=_A ) else: return self.tokenizer.batch_decode(*_A , **_A ) def UpperCAmelCase_ ( self : Optional[Any] , *_A : int , **_A : Any ) -> Dict: """simple docstring""" return self.tokenizer.decode(*_A , **_A ) def UpperCAmelCase_ ( self : Tuple , _A : Optional[Any] , _A : Dict = None ) -> List[np.ndarray]: """simple docstring""" snake_case_ : Tuple = to_numpy(_A ) snake_case_ : Union[str, Any] = audio_values.shape if padding_mask is None: return list(_A ) snake_case_ : List[str] = to_numpy(_A ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) snake_case_ : Any = seq_len - padding_mask.shape[-1] snake_case_ : List[Any] = 1 - self.feature_extractor.padding_value snake_case_ : Tuple = np.pad(_A , ((0, 0), (0, difference)) , 'constant' , constant_values=_A ) snake_case_ : Union[str, Any] = audio_values.tolist() for i in range(_A ): snake_case_ : Tuple = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] snake_case_ : Any = sliced_audio.reshape(_A , -1 ) return audio_values
327
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowercase : Dict = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[Any] = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[str] = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Dict = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
3
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_funnel import FunnelTokenizer _snake_case = logging.get_logger(__name__) _snake_case = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} _snake_case = [ 'small', 'small-base', 'medium', 'medium-base', 'intermediate', 'intermediate-base', 'large', 'large-base', 'xlarge', 'xlarge-base', ] _snake_case = { 'vocab_file': { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt', 'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt', 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt', 'funnel-transformer/medium-base': ( 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt' ), 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt', 'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt', 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt', 'funnel-transformer/xlarge-base': ( 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json', 'funnel-transformer/small-base': ( 'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json' ), 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json', 'funnel-transformer/medium-base': ( 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json' ), 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json', 'funnel-transformer/large-base': ( 'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json' ), 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json', 'funnel-transformer/xlarge-base': ( 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json' ), }, } _snake_case = {F'''funnel-transformer/{name}''': 512 for name in _model_names} _snake_case = {F'''funnel-transformer/{name}''': {'do_lower_case': True} for name in _model_names} class UpperCamelCase ( __snake_case ): UpperCamelCase : Dict = VOCAB_FILES_NAMES UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase : Tuple = PRETRAINED_INIT_CONFIGURATION UpperCamelCase : Optional[int] = FunnelTokenizer UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase : Optional[Any] = 2 def __init__( self : Any , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : List[str]="<sep>" , UpperCAmelCase__ : Any="<pad>" , UpperCAmelCase__ : Any="<cls>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : Tuple="<s>" , UpperCAmelCase__ : Union[str, Any]="</s>" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[str]="##" , **UpperCAmelCase__ : Any , ) -> Tuple: super().__init__( UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , clean_text=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , wordpieces_prefix=UpperCAmelCase__ , **UpperCAmelCase__ , ) _a : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , UpperCAmelCase__ ) != do_lower_case or normalizer_state.get("""strip_accents""" , UpperCAmelCase__ ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , UpperCAmelCase__ ) != tokenize_chinese_chars ): _a : List[Any] = getattr(UpperCAmelCase__ , normalizer_state.pop("""type""" ) ) _a : Union[str, Any] = do_lower_case _a : Optional[int] = strip_accents _a : Union[str, Any] = tokenize_chinese_chars _a : Tuple = normalizer_class(**UpperCAmelCase__ ) _a : int = do_lower_case def _lowercase ( self : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any]=None ) -> Union[str, Any]: _a : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowercase ( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple = None ) -> List[int]: _a : List[Any] = [self.sep_token_id] _a : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any = None ) -> Tuple[str]: _a : Tuple = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ ) return tuple(UpperCAmelCase__ )
294
'''simple docstring''' import os import sys import unittest lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py') lowercase : Optional[int] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py') class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Any = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : List[Any] = {'''BertModelTest''': '''BertModelTester'''} A : int = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : List[str] = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } A : Union[str, Any] = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : int = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Dict = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } A : str = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
3
0
"""simple docstring""" import requests def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : Any ) -> Union[str, Any]: _lowerCAmelCase : int = {'''Content-Type''': '''application/json'''} _lowerCAmelCase : List[str] = requests.post(snake_case__ ,json={"""text""": message_body} ,headers=snake_case__ ) if response.status_code != 200: _lowerCAmelCase : Optional[Any] = ( '''Request to slack returned an error ''' f"{response.status_code}, the response is:\n{response.text}" ) raise ValueError(snake_case__ ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
44
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class A ( __snake_case ): __magic_name__ = DistilBertTokenizer __magic_name__ = DistilBertTokenizerFast __magic_name__ = True @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[Any] = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' ) A : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE ) A : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
3
0
from collections import deque def A ( _lowercase ): SCREAMING_SNAKE_CASE : Dict = len(snake_case__ ) SCREAMING_SNAKE_CASE : str = deque() SCREAMING_SNAKE_CASE : int = [False for _ in range(snake_case__ )] SCREAMING_SNAKE_CASE : List[Any] = [-1 for _ in range(snake_case__ )] SCREAMING_SNAKE_CASE : Optional[int] = index_of[:] def strong_connect(_lowercase , _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : int = index # the number when this node is seen SCREAMING_SNAKE_CASE : Tuple = index # lowest rank node reachable from here index += 1 stack.append(snake_case__ ) SCREAMING_SNAKE_CASE : List[str] = True for w in g[v]: if index_of[w] == -1: SCREAMING_SNAKE_CASE : List[str] = strong_connect(snake_case__ , snake_case__ , snake_case__ ) SCREAMING_SNAKE_CASE : str = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: SCREAMING_SNAKE_CASE : List[Any] = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: SCREAMING_SNAKE_CASE : Optional[int] = [] SCREAMING_SNAKE_CASE : List[str] = stack.pop() SCREAMING_SNAKE_CASE : List[str] = False component.append(snake_case__ ) while w != v: SCREAMING_SNAKE_CASE : Optional[Any] = stack.pop() SCREAMING_SNAKE_CASE : List[str] = False component.append(snake_case__ ) components.append(snake_case__ ) return index SCREAMING_SNAKE_CASE : Any = [] for v in range(snake_case__ ): if index_of[v] == -1: strong_connect(snake_case__ , 0 , snake_case__ ) return components def A ( _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : Optional[Any] = [[] for _ in range(snake_case__ )] for u, v in edges: g[u].append(snake_case__ ) return g if __name__ == "__main__": # Test __UpperCamelCase : str = 7 __UpperCamelCase : Any = [0, 0, 1, 2, 3, 3, 4, 4, 6] __UpperCamelCase : Tuple = [1, 3, 2, 0, 1, 4, 5, 6, 5] __UpperCamelCase : Optional[int] = [(u, v) for u, v in zip(source, target)] __UpperCamelCase : Any = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
182
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase : Optional[int] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''input_features''', '''attention_mask'''] def __init__( self , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=16000 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" super().__init__(feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = num_mel_bins A : Tuple = do_ceptral_normalize A : Dict = normalize_means A : List[Any] = normalize_vars A : List[str] = True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers A : Any = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) A : Any = ta_kaldi.fbank(SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray: """simple docstring""" if normalize_means: A : Dict = x[:input_length].mean(axis=0 ) A : Optional[Any] = np.subtract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if normalize_vars: A : str = x[:input_length].std(axis=0 ) A : int = np.divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if input_length < x.shape[0]: A : List[str] = padding_value # make sure array is in float32 A : Tuple = x.astype(np.floataa ) return x def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]: """simple docstring""" A : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' F' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A : List[Any] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) A : Tuple = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ): A : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A : Any = [raw_speech] # extract fbank features A : List[str] = [self._extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in raw_speech] # convert into correct format for padding A : str = BatchFeature({'''input_features''': features} ) A : Union[str, Any] = self.pad( SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) # make sure list is in array format A : List[str] = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ): A : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features] A : Union[str, Any] = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: A : Dict = ( np.array(SCREAMING_SNAKE_CASE , dtype=np.intaa ) if self._get_padding_strategies(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD else None ) A : List[Any] = self.normalize( padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE ) if return_tensors is not None: A : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE ) return padded_inputs
3
0
'''simple docstring''' import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin _lowercase : Any = get_tests_dir("fixtures/test_sentencepiece_bpe.model") class lowerCAmelCase__ ( __snake_case , unittest.TestCase ): lowerCAmelCase_ = BartphoTokenizer lowerCAmelCase_ = False lowerCAmelCase_ = True def _snake_case ( self ): """simple docstring""" super().setUp() lowercase_ : Dict = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] lowercase_ : List[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) ) lowercase_ : str = {'''unk_token''': '''<unk>'''} lowercase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] ) with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp: for token in vocab_tokens: fp.write(F'''{token} {vocab_tokens[token]}\n''' ) lowercase_ : Dict = BartphoTokenizer(__SCREAMING_SNAKE_CASE , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def _snake_case ( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def _snake_case ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : Tuple = '''This is a là test''' lowercase_ : Tuple = '''This is a<unk><unk> test''' return input_text, output_text def _snake_case ( self ): """simple docstring""" lowercase_ : Any = BartphoTokenizer(__SCREAMING_SNAKE_CASE , self.monolingual_vocab_file , **self.special_tokens_map ) lowercase_ : Tuple = '''This is a là test''' lowercase_ : Any = '''▁This ▁is ▁a ▁l à ▁t est'''.split() lowercase_ : Optional[int] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = tokens + [tokenizer.unk_token] lowercase_ : Tuple = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
93
'''simple docstring''' import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 lowercase : str = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowercase : str = get_tests_dir('fixtures/vocab.json') lowercase : int = get_tests_dir('fixtures') class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = 0 def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : List[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Union[str, Any] = WavaVecaConfig() A : List[str] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) # save in new folder model_config.save_pretrained(SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Dict = WavaVecaFeatureExtractor() A : List[str] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in tokenizer with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : List[Any] = WavaVecaFeatureExtractor() A : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in feature extractor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : str = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : str = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' ) model_config.save_pretrained(SCREAMING_SNAKE_CASE ) # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) # create emtpy sample processor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write('''{}''' ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Union[str, Any] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) A : List[str] = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) A : Tuple = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version A : List[str] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE ) A : int = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE ): AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API A : List[Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Tuple = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : Optional[int] = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Any = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = '''AutoFeatureExtractor''' __magic_name__ = '''AutoTokenizer''' __magic_name__ = False try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local classes. A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. A : Optional[int] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. A : Tuple = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : int = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' ) self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' ) @is_staging_test class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def __lowerCAmelCase ( cls ) -> Dict: """simple docstring""" A : Optional[int] = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE ) @classmethod def __lowerCAmelCase ( cls ) -> Any: """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' ) except HTTPError: pass def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Union[str, Any] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A : int = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor-org''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization='''valid_org''' , ) A : int = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() A : Any = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : str = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F'{USER}/test-dynamic-processor' , token=self._token ) A : List[str] = Repository(SCREAMING_SNAKE_CASE , clone_from=F'{USER}/test-dynamic-processor' , token=self._token ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { '''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''', '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) self.assertDictEqual( tokenizer_config['''auto_map'''] , { '''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None], '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_feature_extraction.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_tokenization.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_processing.py''' ) ) ) repo.push_to_hub() A : Optional[int] = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
3
0
from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __lowerCamelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name __lowerCamelCase : str = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n' def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int]=8 ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 SCREAMING_SNAKE_CASE__ = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any=5_12 , __UpperCamelCase : Optional[int]=5_12 ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 ) SCREAMING_SNAKE_CASE__ = np.array(pil_image.convert("""RGB""" ) ) SCREAMING_SNAKE_CASE__ = arr.astype(np.floataa ) / 1_27.5 - 1 SCREAMING_SNAKE_CASE__ = np.transpose(snake_case__ , [2, 0, 1] ) SCREAMING_SNAKE_CASE__ = torch.from_numpy(snake_case__ ).unsqueeze(0 ) return image class __snake_case ( __snake_case ): def __init__( self : Any , _lowercase : Tuple , _lowercase : Dict , _lowercase : int , ): """simple docstring""" super().__init__() self.register_modules( unet=_lowercase , scheduler=_lowercase , movq=_lowercase , ) SCREAMING_SNAKE_CASE__ = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __a ( self : str , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = min(int(num_inference_steps * strength ) , _lowercase ) SCREAMING_SNAKE_CASE__ = max(num_inference_steps - init_timestep , 0 ) SCREAMING_SNAKE_CASE__ = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def __a ( self : Union[str, Any] , _lowercase : List[Any] , _lowercase : str , _lowercase : List[str] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Any=None ): """simple docstring""" if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}""" ) SCREAMING_SNAKE_CASE__ = image.to(device=_lowercase , dtype=_lowercase ) SCREAMING_SNAKE_CASE__ = batch_size * num_images_per_prompt if image.shape[1] == 4: SCREAMING_SNAKE_CASE__ = image else: if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) elif isinstance(_lowercase , _lowercase ): SCREAMING_SNAKE_CASE__ = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowercase ) ] SCREAMING_SNAKE_CASE__ = torch.cat(_lowercase , dim=0 ) else: SCREAMING_SNAKE_CASE__ = self.movq.encode(_lowercase ).latent_dist.sample(_lowercase ) SCREAMING_SNAKE_CASE__ = self.movq.config.scaling_factor * init_latents SCREAMING_SNAKE_CASE__ = torch.cat([init_latents] , dim=0 ) SCREAMING_SNAKE_CASE__ = init_latents.shape SCREAMING_SNAKE_CASE__ = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase ) # get latents SCREAMING_SNAKE_CASE__ = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase ) SCREAMING_SNAKE_CASE__ = init_latents return latents def __a ( self : Any , _lowercase : List[str]=0 ): """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) SCREAMING_SNAKE_CASE__ = torch.device(f"""cuda:{gpu_id}""" ) SCREAMING_SNAKE_CASE__ = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_lowercase , _lowercase ) def __a ( self : Optional[Any] , _lowercase : Optional[int]=0 ): """simple docstring""" if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) SCREAMING_SNAKE_CASE__ = torch.device(f"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=_lowercase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) SCREAMING_SNAKE_CASE__ = None for cpu_offloaded_model in [self.unet, self.movq]: SCREAMING_SNAKE_CASE__ = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase ) # We'll offload the last model manually. SCREAMING_SNAKE_CASE__ = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __a ( self : Union[str, Any] ): """simple docstring""" if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(_lowercase , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_lowercase ) def __call__( self : Any , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : Optional[int] = 5_12 , _lowercase : Union[str, Any] = 5_12 , _lowercase : Union[str, Any] = 1_00 , _lowercase : Optional[Any] = 4.0 , _lowercase : Optional[int] = 0.3 , _lowercase : List[str] = 1 , _lowercase : Any = None , _lowercase : Union[str, Any] = "pil" , _lowercase : Optional[Any] = True , ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self._execution_device SCREAMING_SNAKE_CASE__ = guidance_scale > 1.0 if isinstance(_lowercase , _lowercase ): SCREAMING_SNAKE_CASE__ = torch.cat(_lowercase , dim=0 ) SCREAMING_SNAKE_CASE__ = image_embeds.shape[0] if isinstance(_lowercase , _lowercase ): SCREAMING_SNAKE_CASE__ = torch.cat(_lowercase , dim=0 ) if do_classifier_free_guidance: SCREAMING_SNAKE_CASE__ = image_embeds.repeat_interleave(_lowercase , dim=0 ) SCREAMING_SNAKE_CASE__ = negative_image_embeds.repeat_interleave(_lowercase , dim=0 ) SCREAMING_SNAKE_CASE__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase ) if not isinstance(_lowercase , _lowercase ): SCREAMING_SNAKE_CASE__ = [image] if not all(isinstance(_lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( f"""Input is in incorrect format: {[type(_lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" ) SCREAMING_SNAKE_CASE__ = torch.cat([prepare_image(_lowercase , _lowercase , _lowercase ) for i in image] , dim=0 ) SCREAMING_SNAKE_CASE__ = image.to(dtype=image_embeds.dtype , device=_lowercase ) SCREAMING_SNAKE_CASE__ = self.movq.encode(_lowercase )['''latents'''] SCREAMING_SNAKE_CASE__ = latents.repeat_interleave(_lowercase , dim=0 ) self.scheduler.set_timesteps(_lowercase , device=_lowercase ) SCREAMING_SNAKE_CASE__ = self.get_timesteps(_lowercase , _lowercase , _lowercase ) SCREAMING_SNAKE_CASE__ = timesteps[:1].repeat(batch_size * num_images_per_prompt ) SCREAMING_SNAKE_CASE__ = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor ) SCREAMING_SNAKE_CASE__ = self.prepare_latents( _lowercase , _lowercase , _lowercase , _lowercase , image_embeds.dtype , _lowercase , _lowercase ) for i, t in enumerate(self.progress_bar(_lowercase ) ): # expand the latents if we are doing classifier free guidance SCREAMING_SNAKE_CASE__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents SCREAMING_SNAKE_CASE__ = {'''image_embeds''': image_embeds} SCREAMING_SNAKE_CASE__ = self.unet( sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0] if do_classifier_free_guidance: SCREAMING_SNAKE_CASE__ = noise_pred.split(latents.shape[1] , dim=1 ) SCREAMING_SNAKE_CASE__ = noise_pred.chunk(2 ) SCREAMING_SNAKE_CASE__ = variance_pred.chunk(2 ) SCREAMING_SNAKE_CASE__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) SCREAMING_SNAKE_CASE__ = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): SCREAMING_SNAKE_CASE__ = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 SCREAMING_SNAKE_CASE__ = self.scheduler.step( _lowercase , _lowercase , _lowercase , generator=_lowercase , )[0] # post-processing SCREAMING_SNAKE_CASE__ = self.movq.decode(_lowercase , force_not_quantize=_lowercase )['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: SCREAMING_SNAKE_CASE__ = image * 0.5 + 0.5 SCREAMING_SNAKE_CASE__ = image.clamp(0 , 1 ) SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(_lowercase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_lowercase )
219
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowercase : Optional[Any] = None lowercase : Tuple = logging.get_logger(__name__) lowercase : Dict = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'} lowercase : Tuple = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, 'tokenizer_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json', }, } lowercase : List[str] = { 'google/rembert': 2_56, } lowercase : Dict = '▁' class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = RemBertTokenizer def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token super().__init__( SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : List[Any] = do_lower_case A : str = remove_space A : int = keep_accents A : Union[str, Any] = vocab_file A : List[Any] = False if not self.vocab_file else True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : List[Any] = [self.sep_token_id] A : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : Tuple = [self.sep_token_id] A : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) ) return A : Any = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
3
0
import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def lowerCamelCase__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" if gpta_config_file == "": lowerCAmelCase_ = GPTaConfig() else: lowerCAmelCase_ = GPTaConfig.from_json_file(snake_case__ ) lowerCAmelCase_ = GPTaModel(snake_case__ ) # Load weights from numpy load_tf_weights_in_gpta(snake_case__ , snake_case__ , snake_case__ ) # Save pytorch-model lowerCAmelCase_ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME lowerCAmelCase_ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , snake_case__ ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(snake_case__ , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--gpt2_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained OpenAI model. \n" "This specifies the model architecture." ), ) _A = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
231
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase : Optional[Any] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''pixel_values'''] def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : str = size if size is not None else {'''shortest_edge''': 384} A : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : str = do_resize A : List[Any] = size # Default value set here for backwards compatibility where the value in config is None A : List[Any] = crop_pct if crop_pct is not None else 224 / 256 A : Optional[int] = resample A : Union[str, Any] = do_rescale A : List[str] = rescale_factor A : Union[str, Any] = do_normalize A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : str = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) A : Any = size['''shortest_edge'''] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct A : Dict = int(shortest_edge / crop_pct ) A : str = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : int = resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) else: # warping (no cropping) when evaluated at 384 or larger return resize( SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image: """simple docstring""" A : int = do_resize if do_resize is not None else self.do_resize A : Tuple = crop_pct if crop_pct is not None else self.crop_pct A : Optional[Any] = resample if resample is not None else self.resample A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean A : List[str] = image_std if image_std is not None else self.image_std A : Union[str, Any] = size if size is not None else self.size A : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : Any = make_list_of_images(SCREAMING_SNAKE_CASE ) if not valid_images(SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. A : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images] if do_resize: A : Any = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , crop_pct=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: A : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: A : Dict = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images] A : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images] A : Optional[int] = {'''pixel_values''': images} return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
3
0
'''simple docstring''' import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class __lowerCAmelCase (__snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : List[str] = IFPipeline lowerCAmelCase__ : Optional[Any] = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""} lowerCAmelCase__ : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS lowerCAmelCase__ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' return self._get_dummy_components() def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int]=0 ): '''simple docstring''' if str(UpperCamelCase ).startswith('''mps''' ): lowercase__ = torch.manual_seed(UpperCamelCase ) else: lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) lowercase__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def UpperCamelCase__ (self : Tuple ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1E-1 ) def UpperCamelCase__ (self : Any ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def UpperCamelCase__ (self : Any ): '''simple docstring''' self._test_save_load_local() def UpperCamelCase__ (self : Dict ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @slow @require_torch_gpu class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : List[str] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa ) lowercase__ = IFSuperResolutionPipeline.from_pretrained( '''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to('''cuda''' ) lowercase__ = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() lowercase__ = None lowercase__ = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img lowercase__ = IFImgaImgPipeline(**pipe_a.components ) lowercase__ = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting lowercase__ = IFInpaintingPipeline(**pipe_a.components ) lowercase__ = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : int , UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : List[str] ): '''simple docstring''' _start_torch_memory_measurement() lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowercase__ = pipe_a( prompt_embeds=UpperCamelCase , negative_prompt_embeds=UpperCamelCase , num_inference_steps=2 , generator=UpperCamelCase , output_type='''np''' , ) lowercase__ = output.images[0] assert image.shape == (64, 64, 3) lowercase__ = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 lowercase__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' ) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase ) # pipeline 2 _start_torch_memory_measurement() lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase ) lowercase__ = pipe_a( prompt_embeds=UpperCamelCase , negative_prompt_embeds=UpperCamelCase , image=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=2 , output_type='''np''' , ) lowercase__ = output.images[0] assert image.shape == (256, 256, 3) lowercase__ = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 lowercase__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : int , UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : Dict ): '''simple docstring''' _start_torch_memory_measurement() lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase ) lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowercase__ = pipe_a( prompt_embeds=UpperCamelCase , negative_prompt_embeds=UpperCamelCase , image=UpperCamelCase , num_inference_steps=2 , generator=UpperCamelCase , output_type='''np''' , ) lowercase__ = output.images[0] assert image.shape == (64, 64, 3) lowercase__ = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 lowercase__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' ) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase ) # pipeline 2 _start_torch_memory_measurement() lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowercase__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase ) lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase ) lowercase__ = pipe_a( prompt_embeds=UpperCamelCase , negative_prompt_embeds=UpperCamelCase , image=UpperCamelCase , original_image=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=2 , output_type='''np''' , ) lowercase__ = output.images[0] assert image.shape == (256, 256, 3) lowercase__ = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 lowercase__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ): '''simple docstring''' _start_torch_memory_measurement() lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase ) lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(UpperCamelCase ) lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowercase__ = pipe_a( prompt_embeds=UpperCamelCase , negative_prompt_embeds=UpperCamelCase , image=UpperCamelCase , mask_image=UpperCamelCase , num_inference_steps=2 , generator=UpperCamelCase , output_type='''np''' , ) lowercase__ = output.images[0] assert image.shape == (64, 64, 3) lowercase__ = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 lowercase__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' ) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase ) # pipeline 2 _start_torch_memory_measurement() lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase ) lowercase__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase ) lowercase__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(UpperCamelCase ) lowercase__ = pipe_a( prompt_embeds=UpperCamelCase , negative_prompt_embeds=UpperCamelCase , image=UpperCamelCase , mask_image=UpperCamelCase , original_image=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=2 , output_type='''np''' , ) lowercase__ = output.images[0] assert image.shape == (256, 256, 3) lowercase__ = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 lowercase__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase ) def _SCREAMING_SNAKE_CASE () -> int: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
2
'''simple docstring''' import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[str] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> int: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Any = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: A : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: A : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : int = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ): A : List[Any] = FlaxAutoModel.from_pretrained('''bert-base''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): A : Optional[int] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ): A : List[str] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , '''Use `from_pt=True` to load this model''' ): A : Any = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
3
0
from __future__ import annotations lowercase__ : Optional[Any] = list[tuple[int, int]] lowercase__ : Optional[Any] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowercase__ : List[str] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class lowercase_ : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) ->Optional[Any]: lowerCAmelCase = pos_x lowerCAmelCase = pos_y lowerCAmelCase = (pos_y, pos_x) lowerCAmelCase = goal_x lowerCAmelCase = goal_y lowerCAmelCase = g_cost lowerCAmelCase = parent lowerCAmelCase = self.calculate_heuristic() def SCREAMING_SNAKE_CASE_ ( self ) ->float: lowerCAmelCase = abs(self.pos_x - self.goal_x ) lowerCAmelCase = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , __SCREAMING_SNAKE_CASE ) ->bool: return self.f_cost < other.f_cost class lowercase_ : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->str: lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = [self.start] lowerCAmelCase = [] lowerCAmelCase = False def SCREAMING_SNAKE_CASE_ ( self ) ->Path | None: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() lowerCAmelCase = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: lowerCAmelCase = True return self.retrace_path(__SCREAMING_SNAKE_CASE ) self.closed_nodes.append(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.get_successors(__SCREAMING_SNAKE_CASE ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(__SCREAMING_SNAKE_CASE ) else: # retrieve the best current path lowerCAmelCase = self.open_nodes.pop(self.open_nodes.index(__SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(__SCREAMING_SNAKE_CASE ) else: self.open_nodes.append(__SCREAMING_SNAKE_CASE ) if not self.reached: return [self.start.pos] return None def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->list[Node]: lowerCAmelCase = [] for action in delta: lowerCAmelCase = parent.pos_x + action[1] lowerCAmelCase = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __SCREAMING_SNAKE_CASE , ) ) return successors def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Path: lowerCAmelCase = node lowerCAmelCase = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) lowerCAmelCase = current_node.parent path.reverse() return path if __name__ == "__main__": lowercase__ : Tuple = (0, 0) lowercase__ : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('''------''') lowercase__ : List[str] = GreedyBestFirst(init, goal) lowercase__ : Any = greedy_bf.search() if path: for pos_x, pos_y in path: lowercase__ : Optional[Any] = 2 for elem in grid: print(elem)
338
'''simple docstring''' import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowercase : Union[str, Any] = { 'text_branch': 'text_model', 'audio_branch': 'audio_model.audio_encoder', 'attn': 'attention.self', 'self.proj': 'output.dense', 'attention.self_mask': 'attn_mask', 'mlp.fc1': 'intermediate.dense', 'mlp.fc2': 'output.dense', 'norm1': 'layernorm_before', 'norm2': 'layernorm_after', 'bn0': 'batch_norm', } lowercase : Tuple = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc') def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' A, A : Tuple = create_model( '''HTSAT-tiny''' , '''roberta''' , snake_case__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case__ , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = {} A : str = R'''.*sequential.(\d+).*''' A : Union[str, Any] = R'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: A : Any = key.replace(snake_case__ , snake_case__ ) if re.match(snake_case__ , snake_case__ ): # replace sequential layers with list A : Any = re.match(snake_case__ , snake_case__ ).group(1 ) A : List[str] = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case__ )//3}.linear.' ) elif re.match(snake_case__ , snake_case__ ): A : Union[str, Any] = int(re.match(snake_case__ , snake_case__ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... A : str = 1 if projecton_layer == 0 else 2 A : Optional[Any] = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' ) if "audio" and "qkv" in key: # split qkv into query key and value A : int = value A : List[Any] = mixed_qkv.size(0 ) // 3 A : Union[str, Any] = mixed_qkv[:qkv_dim] A : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2] A : Optional[int] = mixed_qkv[qkv_dim * 2 :] A : Tuple = query_layer A : Union[str, Any] = key_layer A : Optional[int] = value_layer else: A : Dict = value return model_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' A, A : int = init_clap(snake_case__ , enable_fusion=snake_case__ ) clap_model.eval() A : str = clap_model.state_dict() A : Union[str, Any] = rename_state_dict(snake_case__ ) A : Tuple = ClapConfig() A : str = enable_fusion A : str = ClapModel(snake_case__ ) # ignore the spectrogram embedding layer model.load_state_dict(snake_case__ , strict=snake_case__ ) model.save_pretrained(snake_case__ ) transformers_config.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not') lowercase : Tuple = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
3
0
__snake_case : Tuple = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []} __snake_case : Optional[Any] = ['a', 'b', 'c', 'd', 'e'] def _UpperCAmelCase ( a__ , a__ , a__): '''simple docstring''' a_ : Optional[Any] = start # add current to visited visited.append(snake_case__) a_ : Optional[Any] = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: a_ : str = topological_sort(snake_case__ , snake_case__ , snake_case__) # if all neighbors visited add current to sort sort.append(snake_case__) # if all vertices haven't been visited select a new one to visit if len(snake_case__) != len(snake_case__): for vertice in vertices: if vertice not in visited: a_ : int = topological_sort(snake_case__ , snake_case__ , snake_case__) # return sort return sort if __name__ == "__main__": __snake_case : Optional[Any] = topological_sort("""a""", [], []) print(sort)
248
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging lowercase : Dict = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: A : Union[str, Any] = os.path.abspath(snake_case__ ) logger.info(F'Loading PyTorch weights from {pt_path}' ) A : Any = torch.load(snake_case__ , map_location='''cpu''' ) logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' ) A : List[str] = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files A : Any = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ ) return flax_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' def is_key_or_prefix_key_in_dict(snake_case__ ) -> bool: return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0 # layer norm A : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean A : Tuple = pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var A : Dict = pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # embedding A : Any = pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # conv layer A : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ): A : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer A : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ): A : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight A : Dict = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias A : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 A : Dict = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): A : List[Any] = pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): A : List[str] = pt_tuple_key[-2] + '''_v''' if name is not None: A : int = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} A : int = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: A : List[str] = flax_model.params['''params'''] else: A : Dict = flax_model.params A : List[Any] = flatten_dict(snake_case__ ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : List[str] = flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(snake_case__ ) A : int = {} A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : int = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : str = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : Union[str, Any] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Any = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Dict = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : Any = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: A : Tuple = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : List[str] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch # Load the index A : Union[str, Any] = {} for shard_file in shard_filenames: # load using msgpack utils A : List[str] = torch.load(snake_case__ ) A : int = {k: v.numpy() for k, v in pt_state_dict.items()} A : Tuple = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : Optional[int] = flax_model.params['''params'''] A : List[Any] = flatten_dict(snake_case__ ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: A : Dict = flax_model.params A : Tuple = flatten_dict(snake_case__ ) A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : List[str] = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : int = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : List[str] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Union[str, Any] = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Any = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : int = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue if "var" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = os.path.abspath(snake_case__ ) logger.info(F'Loading Flax weights from {flax_checkpoint_path}' ) # import correct flax class A : List[str] = getattr(snake_case__ , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(snake_case__ , '''rb''' ) as state_f: try: A : int = from_bytes(snake_case__ , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights A : List[str] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) A : Optional[Any] = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) A : Union[str, Any] = flatten_dict(snake_case__ ) A : List[Any] = pt_model.state_dict() A : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) A : Tuple = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys A : int = [] A : Any = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): A : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix A : int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: A : List[str] = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: A : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict: # conv layer A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) A : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict: # linear layer A : Tuple = flax_key_tuple[:-1] + ('''weight''',) A : Tuple = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: A : List[Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: A : Union[str, Any] = '''.'''.join(snake_case__ ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. A : int = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: A : Optional[int] = key.split('''.''' ) A : Dict = None if key_components[-3::2] == ["parametrizations", "original0"]: A : List[str] = key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: A : List[Any] = key_components[-2] + '''_v''' if name is not None: A : str = key_components[:-3] + [name] A : Optional[Any] = '''.'''.join(snake_case__ ) A : Optional[Any] = key if flax_key in special_pt_names: A : Optional[Any] = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict A : Dict = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor A : Dict = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list A : List[Any] = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' ) if len(snake_case__ ) > 0: logger.warning( F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ''' use it for predictions and inference.''' ) else: logger.warning( F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'you can already use {pt_model.__class__.__name__} for predictions without further training.' ) return pt_model
3
0
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __snake_case ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : str = '''ylacombe/bark-small''' lowercase : Optional[Any] = tempfile.mkdtemp() lowercase : Optional[int] = '''en_speaker_1''' lowercase : Dict = '''This is a test string''' lowercase : Dict = '''speaker_embeddings_path.json''' lowercase : List[str] = '''speaker_embeddings''' def _SCREAMING_SNAKE_CASE ( self ,**snake_case ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint ,**snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Any = self.get_tokenizer() lowercase : str = BarkProcessor(tokenizer=snake_case ) processor.save_pretrained(self.tmpdirname ) lowercase : Tuple = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() ) @slow def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : List[Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,) processor.save_pretrained( self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,) lowercase : Dict = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) lowercase : Optional[Any] = BarkProcessor.from_pretrained( self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Any = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,) lowercase : List[str] = 35 lowercase : Optional[Any] = 2 lowercase : str = 8 lowercase : str = { '''semantic_prompt''': np.ones(snake_case ), '''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ), '''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset lowercase : Dict = processor(text=self.input_string ,voice_preset=snake_case ) lowercase : Dict = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(snake_case ,np.array([] ) ).tolist() ) # test loading voice preset from npz file lowercase : Optional[int] = os.path.join(self.tmpdirname ,"""file.npz""" ) np.savez(snake_case ,**snake_case ) lowercase : Dict = processor(text=self.input_string ,voice_preset=snake_case ) lowercase : Optional[int] = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(snake_case ,np.array([] ) ).tolist() ) # test loading voice preset from the hub lowercase : Optional[Any] = processor(text=self.input_string ,voice_preset=self.voice_preset ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : int = self.get_tokenizer() lowercase : Dict = BarkProcessor(tokenizer=snake_case ) lowercase : List[str] = processor(text=self.input_string ) lowercase : Union[str, Any] = tokenizer( self.input_string ,padding="""max_length""" ,max_length=256 ,add_special_tokens=snake_case ,return_attention_mask=snake_case ,return_token_type_ids=snake_case ,) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
20
'''simple docstring''' import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint lowercase : Optional[int] = { '169M': 12, '430M': 24, '1B5': 24, '3B': 32, '7B': 32, '14B': 40, } lowercase : Optional[Any] = { '169M': 7_68, '430M': 10_24, '1B5': 20_48, '3B': 25_60, '7B': 40_96, '14B': 51_20, } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[Any] = list(state_dict.keys() ) for name in state_dict_keys: A : str = state_dict.pop(snake_case__ ) # emb -> embedding if name.startswith('''emb.''' ): A : Optional[Any] = name.replace('''emb.''' , '''embeddings.''' ) # ln_0 -> pre_ln (only present at block 0) if name.startswith('''blocks.0.ln0''' ): A : Union[str, Any] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' ) # att -> attention A : int = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , snake_case__ ) # ffn -> feed_forward A : List[Any] = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , snake_case__ ) # time_mix_k -> time_mix_key and reshape if name.endswith('''.time_mix_k''' ): A : List[str] = name.replace('''.time_mix_k''' , '''.time_mix_key''' ) # time_mix_v -> time_mix_value and reshape if name.endswith('''.time_mix_v''' ): A : Union[str, Any] = name.replace('''.time_mix_v''' , '''.time_mix_value''' ) # time_mix_r -> time_mix_key and reshape if name.endswith('''.time_mix_r''' ): A : Union[str, Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' ) if name != "head.weight": A : List[Any] = '''rwkv.''' + name A : Dict = weight return state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=None ): '''simple docstring''' if tokenizer_file is None: print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' ) A : int = 5_0277 A : Optional[int] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' ) else: A : str = PreTrainedTokenizerFast(tokenizer_file=snake_case__ ) A : Any = len(snake_case__ ) tokenizer.save_pretrained(snake_case__ ) # 2. Build the config A : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: A : List[str] = candidate break if size is None: raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' ) if size not in possible_sizes: raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' ) A : Any = RwkvConfig( vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(snake_case__ ) # 3. Download model file then convert state_dict A : Union[str, Any] = hf_hub_download(snake_case__ , snake_case__ ) A : Tuple = torch.load(snake_case__ , map_location='''cpu''' ) A : List[Any] = convert_state_dict(snake_case__ ) # 4. Split in shards and save A, A : List[str] = shard_checkpoint(snake_case__ ) for shard_file, shard in shards.items(): torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) if index is not None: A : Dict = os.path.join(snake_case__ , snake_case__ ) # Save the index as well with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: A : List[Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n''' f.write(snake_case__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( '''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' ) A : List[Any] = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: A : Union[str, Any] = torch.load(os.path.join(snake_case__ , snake_case__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' ) A : int = AutoModelForCausalLM.from_pretrained(snake_case__ ) model.push_to_hub(snake_case__ , max_shard_size='''2GB''' ) tokenizer.push_to_hub(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.' ) parser.add_argument( '--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.' ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='Where to save the converted model.' ) parser.add_argument( '--tokenizer_file', default=None, type=str, help='Path to the tokenizer file to use (if not provided, only the model is converted).', ) parser.add_argument( '--size', default=None, type=str, help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Push to the Hub the converted model.', ) parser.add_argument( '--model_name', default=None, type=str, help='Name of the pushed model on the Hub, including the username / organization.', ) lowercase : Union[str, Any] = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
3
0
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class SCREAMING_SNAKE_CASE_ ( __snake_case , unittest.TestCase ): __magic_name__: Optional[int] = KandinskyVaaImgaImgPipeline __magic_name__: str = ["image_embeds", "negative_image_embeds", "image"] __magic_name__: Any = [ "image_embeds", "negative_image_embeds", "image", ] __magic_name__: Union[str, Any] = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] __magic_name__: Optional[Any] = False @property def UpperCAmelCase_ ( self : List[Any] ) -> Tuple: """simple docstring""" return 32 @property def UpperCAmelCase_ ( self : Tuple ) -> Any: """simple docstring""" return 32 @property def UpperCAmelCase_ ( self : Tuple ) -> str: """simple docstring""" return self.time_input_dim @property def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: """simple docstring""" return self.time_input_dim * 4 @property def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" return 100 @property def UpperCAmelCase_ ( self : Any ) -> List[Any]: """simple docstring""" torch.manual_seed(0 ) snake_case_ : Union[str, Any] = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } snake_case_ : Any = UNetaDConditionModel(**_A ) return model @property def UpperCAmelCase_ ( self : int ) -> int: """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" torch.manual_seed(0 ) snake_case_ : Optional[Any] = VQModel(**self.dummy_movq_kwargs ) return model def UpperCAmelCase_ ( self : Dict ) -> Any: """simple docstring""" snake_case_ : Optional[int] = self.dummy_unet snake_case_ : List[Any] = self.dummy_movq snake_case_ : Optional[Any] = { '''num_train_timesteps''': 1000, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } snake_case_ : List[Any] = DDIMScheduler(**_A ) snake_case_ : Union[str, Any] = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def UpperCAmelCase_ ( self : Dict , _A : Dict , _A : str=0 ) -> str: """simple docstring""" snake_case_ : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A ) snake_case_ : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _A ) # create init_image snake_case_ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A ) snake_case_ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] snake_case_ : Union[str, Any] = Image.fromarray(np.uinta(_A ) ).convert('RGB' ).resize((256, 256) ) if str(_A ).startswith('mps' ): snake_case_ : int = torch.manual_seed(_A ) else: snake_case_ : Tuple = torch.Generator(device=_A ).manual_seed(_A ) snake_case_ : Tuple = { '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def UpperCAmelCase_ ( self : Tuple ) -> Any: """simple docstring""" snake_case_ : Optional[Any] = '''cpu''' snake_case_ : Tuple = self.get_dummy_components() snake_case_ : List[str] = self.pipeline_class(**_A ) snake_case_ : Any = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) snake_case_ : Optional[Any] = pipe(**self.get_dummy_inputs(_A ) ) snake_case_ : List[Any] = output.images snake_case_ : Union[str, Any] = pipe( **self.get_dummy_inputs(_A ) , return_dict=_A , )[0] snake_case_ : List[str] = image[0, -3:, -3:, -1] snake_case_ : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case_ : Optional[int] = np.array( [0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self : str ) -> Dict: """simple docstring""" snake_case_ : int = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_img2img_frog.npy' ) snake_case_ : Dict = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) snake_case_ : int = '''A red cartoon frog, 4k''' snake_case_ : Tuple = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(_A ) snake_case_ : Optional[Any] = KandinskyVaaImgaImgPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa ) snake_case_ : Dict = pipeline.to(_A ) pipeline.set_progress_bar_config(disable=_A ) snake_case_ : str = torch.Generator(device='cpu' ).manual_seed(0 ) snake_case_ : List[str] = pipe_prior( _A , generator=_A , num_inference_steps=5 , negative_prompt='' , ).to_tuple() snake_case_ : Optional[int] = pipeline( image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , ) snake_case_ : List[Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_A , _A )
327
'''simple docstring''' from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING lowercase : str = logging.get_logger(__name__) @add_end_docstrings(__snake_case ) class A ( __snake_case ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) self.check_model_type(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A, A : Dict = {}, {} if padding is not None: A : List[str] = padding if truncation is not None: A : Dict = truncation if top_k is not None: A : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : int = {'''image''': image, '''question''': question} else: A : Any = image A : Any = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) return results def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" A : Union[str, Any] = load_image(inputs['''image'''] ) A : Optional[Any] = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE ) A : Dict = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(SCREAMING_SNAKE_CASE ) return model_inputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : List[Any] = self.model(**SCREAMING_SNAKE_CASE ) return model_outputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> int: """simple docstring""" if top_k > self.model.config.num_labels: A : Dict = self.model.config.num_labels if self.framework == "pt": A : Optional[int] = model_outputs.logits.sigmoid()[0] A, A : int = probs.topk(SCREAMING_SNAKE_CASE ) else: raise ValueError(F'Unsupported framework: {self.framework}' ) A : int = scores.tolist() A : List[str] = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
3
0
"""simple docstring""" from __future__ import annotations from math import pi, sqrt def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if inductance <= 0: raise ValueError("""Inductance cannot be 0 or negative""" ) elif capacitance <= 0: raise ValueError("""Capacitance cannot be 0 or negative""" ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
294
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase : Union[str, Any] = logging.get_logger(__name__) lowercase : str = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class A ( __snake_case ): __magic_name__ = '''bert''' def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = vocab_size A : Optional[Any] = hidden_size A : List[Any] = num_hidden_layers A : List[str] = num_attention_heads A : Dict = hidden_act A : Optional[Any] = intermediate_size A : List[Any] = hidden_dropout_prob A : List[Any] = attention_probs_dropout_prob A : Optional[Any] = max_position_embeddings A : List[str] = type_vocab_size A : Dict = initializer_range A : str = layer_norm_eps A : int = position_embedding_type A : Dict = use_cache A : str = classifier_dropout class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A : Optional[int] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
3
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: _a : Optional[Any] = None _a : Tuple = logging.get_logger(__name__) _a : Dict = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'} _a : Tuple = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, 'tokenizer_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json', }, } _a : List[str] = { 'google/rembert': 256, } _a : Dict = '▁' class __A ( __snake_case ): _UpperCamelCase : List[Any] = VOCAB_FILES_NAMES _UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Union[str, Any] = RemBertTokenizer def __init__( self , a__=None , a__=None , a__=True , a__=True , a__=False , a__="[CLS]" , a__="[SEP]" , a__="<unk>" , a__="[SEP]" , a__="<pad>" , a__="[CLS]" , a__="[MASK]" , **a__ , ): _lowerCAmelCase : Optional[Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token super().__init__( a__ , tokenizer_file=a__ , do_lower_case=a__ , remove_space=a__ , keep_accents=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , **a__ , ) _lowerCAmelCase : List[Any] = do_lower_case _lowerCAmelCase : str = remove_space _lowerCAmelCase : int = keep_accents _lowerCAmelCase : Union[str, Any] = vocab_file _lowerCAmelCase : List[Any] = False if not self.vocab_file else True def __A ( self , a__ , a__ = None ): _lowerCAmelCase : List[Any] = [self.sep_token_id] _lowerCAmelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __A ( self , a__ , a__ = None , a__ = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1] return [1] + ([0] * len(a__ )) + [1] def __A ( self , a__ , a__ = None ): _lowerCAmelCase : Tuple = [self.sep_token_id] _lowerCAmelCase : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self , a__ , a__ = None ): if not os.path.isdir(a__ ): logger.error("""Vocabulary path ({}) should be a directory""".format(a__ ) ) return _lowerCAmelCase : Any = os.path.join( a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ): copyfile(self.vocab_file , a__ ) return (out_vocab_file,)
44
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : str = BeautifulSoup(requests.get(snake_case__ , params=snake_case__ ).content , '''html.parser''' ) A : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) A : Optional[int] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": lowercase : str = { 'title': ( 'Precisely geometry controlled microsupercapacitors for ultrahigh areal ' 'capacitance, volumetric capacitance, and energy density' ), 'journal': 'Chem. Mater.', 'volume': 30, 'pages': '3979-3990', 'year': 20_18, 'hl': 'en', } print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
3
0
import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def A ( _lowercase , _lowercase ): if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer SCREAMING_SNAKE_CASE : Union[str, Any] = flax_key_tuple[:-1] + ('''weight''',) SCREAMING_SNAKE_CASE : Tuple = torch.permute(snake_case__ , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ): # linear layer SCREAMING_SNAKE_CASE : Any = flax_key_tuple[:-1] + ('''weight''',) SCREAMING_SNAKE_CASE : Union[str, Any] = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: SCREAMING_SNAKE_CASE : int = flax_key_tuple[:-1] + ('''weight''',) return flax_key_tuple, flax_tensor def A ( _lowercase , _lowercase , _lowercase ): if "metadata" in layer: SCREAMING_SNAKE_CASE : Union[str, Any] = layer.split('''metadata''' ) SCREAMING_SNAKE_CASE : List[Any] = ''''''.join(split_layer[0] )[:-1] SCREAMING_SNAKE_CASE : int = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )] elif "kvstore" in layer: SCREAMING_SNAKE_CASE : Any = layer.split('''kvstore''' ) SCREAMING_SNAKE_CASE : List[Any] = ''''''.join(split_layer[0] )[:-1] SCREAMING_SNAKE_CASE : Optional[Any] = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )] else: SCREAMING_SNAKE_CASE : Union[str, Any] = layer.split('''/''' ) SCREAMING_SNAKE_CASE : Optional[int] = '''/'''.join(split_layer[:-1] ) SCREAMING_SNAKE_CASE : Optional[Any] = (split_layer[-1],) if "kvstore/path" in layer: SCREAMING_SNAKE_CASE : int = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}""" elif "kvstore/driver" in layer: SCREAMING_SNAKE_CASE : int = '''file''' else: SCREAMING_SNAKE_CASE : Any = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def A ( _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : Optional[int] = rename_keys(snake_case__ ) SCREAMING_SNAKE_CASE : Tuple = {} for k, v in current_block.items(): SCREAMING_SNAKE_CASE : Tuple = v SCREAMING_SNAKE_CASE : List[str] = new_current_block torch.save(snake_case__ , snake_case__ ) def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = WEIGHTS_NAME ): SCREAMING_SNAKE_CASE : Dict = convert_file_size_to_int(snake_case__ ) SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : str = {} SCREAMING_SNAKE_CASE : Any = 0 SCREAMING_SNAKE_CASE : List[str] = 0 os.makedirs(snake_case__ , exist_ok=snake_case__ ) with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp: SCREAMING_SNAKE_CASE : Tuple = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target'''] SCREAMING_SNAKE_CASE : List[Any] = flatten_dict(snake_case__ , sep='''/''' ) SCREAMING_SNAKE_CASE : List[str] = {} for layer in checkpoint_info.keys(): SCREAMING_SNAKE_CASE : Tuple = get_key_and_tensorstore_dict( snake_case__ , snake_case__ , snake_case__ ) if curr_real_layer_name in all_layers: SCREAMING_SNAKE_CASE : List[str] = content else: SCREAMING_SNAKE_CASE : Optional[Any] = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file SCREAMING_SNAKE_CASE : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(snake_case__ ) SCREAMING_SNAKE_CASE : int = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts SCREAMING_SNAKE_CASE : int = rename_base_flax_keys(tuple(key.split('''/''' ) ) , snake_case__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = '''/'''.join(snake_case__ ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: SCREAMING_SNAKE_CASE : List[Any] = os.path.join( snake_case__ , weights_name.replace('''.bin''' , f"""-{len(snake_case__ )+1:05d}-of-???.bin""" ) ) rename_and_save_block(snake_case__ , snake_case__ ) sharded_state_dicts.append(current_block.keys() ) del current_block SCREAMING_SNAKE_CASE : Dict = {} SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : List[Any] = raw_weights.to(getattr(snake_case__ , snake_case__ ) ) current_block_size += weight_size total_size += weight_size # Add the last block SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(snake_case__ , weights_name.replace('''.bin''' , f"""-{len(snake_case__ )+1:05d}-of-???.bin""" ) ) rename_and_save_block(snake_case__ , snake_case__ ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(snake_case__ ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index SCREAMING_SNAKE_CASE : Union[str, Any] = {} SCREAMING_SNAKE_CASE : List[str] = {} for idx, shard in enumerate(snake_case__ ): SCREAMING_SNAKE_CASE : int = weights_name.replace( '''.bin''' , f"""-{idx+1:05d}-of-{len(snake_case__ ):05d}.bin""" ) # len(sharded_state_dicts):05d} SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(snake_case__ , weights_name.replace('''.bin''' , f"""-{idx+1:05d}-of-???.bin""" ) ) os.rename(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) SCREAMING_SNAKE_CASE : str = shard for key in shard: SCREAMING_SNAKE_CASE : Tuple = shard_file # Add the metadata SCREAMING_SNAKE_CASE : Tuple = {'''total_size''': total_size} SCREAMING_SNAKE_CASE : Optional[int] = {'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' , encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE : Union[str, Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n''' f.write(snake_case__ ) return metadata, index if __name__ == "__main__": __UpperCamelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--switch_t5x_checkpoint_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size') parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted', type=str, required=False, help='Path to the output pytorch model.', ) __UpperCamelCase : List[str] = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def A ( ): from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer SCREAMING_SNAKE_CASE : Optional[Any] = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' ) config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' ) SCREAMING_SNAKE_CASE : Any = SwitchTransformersForConditionalGeneration.from_pretrained( '''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' ) SCREAMING_SNAKE_CASE : Any = TaTokenizer.from_pretrained('''t5-small''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''' SCREAMING_SNAKE_CASE : List[Any] = tokenizer(snake_case__ , return_tensors='''pt''' ).input_ids SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(snake_case__ , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
182
'''simple docstring''' class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Any = None A : Optional[Any] = None A : Tuple = graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Dict = len(SCREAMING_SNAKE_CASE ) A : Optional[Any] = None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if sources is int: A : Dict = [sources] if sinks is int: A : str = [sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return A : Optional[int] = sources[0] A : Union[str, Any] = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: A : Optional[int] = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A : Dict = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A : Dict = max_input_flow A : Tuple = 0 A : Tuple = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A : Optional[Any] = max_input_flow A : Optional[Any] = size - 1 def __lowerCAmelCase ( self ) -> Any: """simple docstring""" if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : List[Any] = algorithm(self ) class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = flow_network A : Optional[Any] = flow_network.verticesCount A : Tuple = flow_network.sourceIndex A : Dict = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A : str = flow_network.graph A : Optional[Any] = False def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" if not self.executed: self._algorithm() A : Optional[int] = True def __lowerCAmelCase ( self ) -> Any: """simple docstring""" pass class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result A : List[str] = -1 def __lowerCAmelCase ( self ) -> str: """simple docstring""" if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) A : Optional[Any] = [[0] * self.verticies_count for i in range(self.verticies_count )] A : Union[str, Any] = [0] * self.verticies_count A : List[Any] = [0] * self.verticies_count def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A : Optional[Any] = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A : Union[str, Any] = 0 while i < len(SCREAMING_SNAKE_CASE ): A : str = vertices_list[i] A : List[str] = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) A : int = 0 else: i += 1 A : Optional[Any] = sum(self.preflow[self.source_index] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Dict = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Dict = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A : Dict = self.heights[to_index] if min_height is not None: A : Dict = min_height + 1 if __name__ == "__main__": lowercase : Optional[int] = [0] lowercase : List[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowercase : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowercase : List[str] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowercase : List[str] = flow_network.find_maximum_flow() print(f'''maximum flow is {maximum_flow}''')
3
0
'''simple docstring''' import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel _lowercase : Union[str, Any] = { 'text_branch': 'text_model', 'audio_branch': 'audio_model.audio_encoder', 'attn': 'attention.self', 'self.proj': 'output.dense', 'attention.self_mask': 'attn_mask', 'mlp.fc1': 'intermediate.dense', 'mlp.fc2': 'output.dense', 'norm1': 'layernorm_before', 'norm2': 'layernorm_after', 'bn0': 'batch_norm', } _lowercase : Tuple = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc") def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int=False ): """simple docstring""" lowercase_ : Tuple = create_model( '''HTSAT-tiny''' , '''roberta''' , snake_case__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case__ , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" lowercase_ : Dict = {} lowercase_ : str = R'''.*sequential.(\d+).*''' lowercase_ : Union[str, Any] = R'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: lowercase_ : Any = key.replace(snake_case__ , snake_case__ ) if re.match(snake_case__ , snake_case__ ): # replace sequential layers with list lowercase_ : Any = re.match(snake_case__ , snake_case__ ).group(1 ) lowercase_ : List[str] = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(snake_case__ )//3}.linear.''' ) elif re.match(snake_case__ , snake_case__ ): lowercase_ : Union[str, Any] = int(re.match(snake_case__ , snake_case__ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... lowercase_ : str = 1 if projecton_layer == 0 else 2 lowercase_ : Optional[Any] = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' ) if "audio" and "qkv" in key: # split qkv into query key and value lowercase_ : int = value lowercase_ : List[Any] = mixed_qkv.size(0 ) // 3 lowercase_ : Union[str, Any] = mixed_qkv[:qkv_dim] lowercase_ : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2] lowercase_ : Optional[int] = mixed_qkv[qkv_dim * 2 :] lowercase_ : Tuple = query_layer lowercase_ : Union[str, Any] = key_layer lowercase_ : Optional[int] = value_layer else: lowercase_ : Dict = value return model_state_dict def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple=False ): """simple docstring""" lowercase_ : int = init_clap(snake_case__ , enable_fusion=snake_case__ ) clap_model.eval() lowercase_ : str = clap_model.state_dict() lowercase_ : Union[str, Any] = rename_state_dict(snake_case__ ) lowercase_ : Tuple = ClapConfig() lowercase_ : str = enable_fusion lowercase_ : str = ClapModel(snake_case__ ) # ignore the spectrogram embedding layer model.load_state_dict(snake_case__ , strict=snake_case__ ) model.save_pretrained(snake_case__ ) transformers_config.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowercase : List[str] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not") _lowercase : Tuple = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
93
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ = 10 ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ) or n < 0: raise ValueError('''Invalid input''' ) A : List[str] = 10**n A : Tuple = 2_8433 * (pow(2 , 783_0457 , snake_case__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(10) = }''')
3
0
import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class __snake_case ( nn.Module ): lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = 0.0 lowerCAmelCase_ = 1 lowerCAmelCase_ = 1 lowerCAmelCase_ = True lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = jnp.floataa def __a ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] for i in range(self.num_layers ): SCREAMING_SNAKE_CASE__ = self.in_channels if i == 0 else self.out_channels SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD( in_channels=_lowercase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_lowercase ) SCREAMING_SNAKE_CASE__ = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_lowercase ) SCREAMING_SNAKE_CASE__ = resnets SCREAMING_SNAKE_CASE__ = attentions if self.add_downsample: SCREAMING_SNAKE_CASE__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Union[str, Any] , _lowercase : Optional[int] , _lowercase : str , _lowercase : Optional[Any] , _lowercase : List[Any]=True ): """simple docstring""" SCREAMING_SNAKE_CASE__ = () for resnet, attn in zip(self.resnets , self.attentions ): SCREAMING_SNAKE_CASE__ = resnet(_lowercase , _lowercase , deterministic=_lowercase ) SCREAMING_SNAKE_CASE__ = attn(_lowercase , _lowercase , deterministic=_lowercase ) output_states += (hidden_states,) if self.add_downsample: SCREAMING_SNAKE_CASE__ = self.downsamplers_a(_lowercase ) output_states += (hidden_states,) return hidden_states, output_states class __snake_case ( nn.Module ): lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = 0.0 lowerCAmelCase_ = 1 lowerCAmelCase_ = True lowerCAmelCase_ = jnp.floataa def __a ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = [] for i in range(self.num_layers ): SCREAMING_SNAKE_CASE__ = self.in_channels if i == 0 else self.out_channels SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD( in_channels=_lowercase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_lowercase ) SCREAMING_SNAKE_CASE__ = resnets if self.add_downsample: SCREAMING_SNAKE_CASE__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Any , _lowercase : Optional[Any] , _lowercase : Tuple , _lowercase : Tuple=True ): """simple docstring""" SCREAMING_SNAKE_CASE__ = () for resnet in self.resnets: SCREAMING_SNAKE_CASE__ = resnet(_lowercase , _lowercase , deterministic=_lowercase ) output_states += (hidden_states,) if self.add_downsample: SCREAMING_SNAKE_CASE__ = self.downsamplers_a(_lowercase ) output_states += (hidden_states,) return hidden_states, output_states class __snake_case ( nn.Module ): lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = 0.0 lowerCAmelCase_ = 1 lowerCAmelCase_ = 1 lowerCAmelCase_ = True lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = jnp.floataa def __a ( self : Any ): """simple docstring""" SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] for i in range(self.num_layers ): SCREAMING_SNAKE_CASE__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels SCREAMING_SNAKE_CASE__ = self.prev_output_channel if i == 0 else self.out_channels SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_lowercase ) SCREAMING_SNAKE_CASE__ = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_lowercase ) SCREAMING_SNAKE_CASE__ = resnets SCREAMING_SNAKE_CASE__ = attentions if self.add_upsample: SCREAMING_SNAKE_CASE__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : str , _lowercase : Dict , _lowercase : Optional[int] , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Tuple=True ): """simple docstring""" for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[-1] SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[:-1] SCREAMING_SNAKE_CASE__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) SCREAMING_SNAKE_CASE__ = resnet(_lowercase , _lowercase , deterministic=_lowercase ) SCREAMING_SNAKE_CASE__ = attn(_lowercase , _lowercase , deterministic=_lowercase ) if self.add_upsample: SCREAMING_SNAKE_CASE__ = self.upsamplers_a(_lowercase ) return hidden_states class __snake_case ( nn.Module ): lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = 0.0 lowerCAmelCase_ = 1 lowerCAmelCase_ = True lowerCAmelCase_ = jnp.floataa def __a ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE__ = [] for i in range(self.num_layers ): SCREAMING_SNAKE_CASE__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels SCREAMING_SNAKE_CASE__ = self.prev_output_channel if i == 0 else self.out_channels SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_lowercase ) SCREAMING_SNAKE_CASE__ = resnets if self.add_upsample: SCREAMING_SNAKE_CASE__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : List[str] , _lowercase : List[Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : Optional[Any]=True ): """simple docstring""" for resnet in self.resnets: # pop res hidden states SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[-1] SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[:-1] SCREAMING_SNAKE_CASE__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) SCREAMING_SNAKE_CASE__ = resnet(_lowercase , _lowercase , deterministic=_lowercase ) if self.add_upsample: SCREAMING_SNAKE_CASE__ = self.upsamplers_a(_lowercase ) return hidden_states class __snake_case ( nn.Module ): lowerCAmelCase_ = 42 lowerCAmelCase_ = 0.0 lowerCAmelCase_ = 1 lowerCAmelCase_ = 1 lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = jnp.floataa def __a ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] SCREAMING_SNAKE_CASE__ = [] for _ in range(self.num_layers ): SCREAMING_SNAKE_CASE__ = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_lowercase ) SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_lowercase ) SCREAMING_SNAKE_CASE__ = resnets SCREAMING_SNAKE_CASE__ = attentions def __call__( self : List[Any] , _lowercase : List[str] , _lowercase : str , _lowercase : List[Any] , _lowercase : Any=True ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.resnets[0](_lowercase , _lowercase ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): SCREAMING_SNAKE_CASE__ = attn(_lowercase , _lowercase , deterministic=_lowercase ) SCREAMING_SNAKE_CASE__ = resnet(_lowercase , _lowercase , deterministic=_lowercase ) return hidden_states
219
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowercase : List[str] = logging.get_logger(__name__) lowercase : str = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class A ( __snake_case ): __magic_name__ = '''gpt_neo''' __magic_name__ = ['''past_key_values'''] __magic_name__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , SCREAMING_SNAKE_CASE=50257 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=24 , SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=50256 , SCREAMING_SNAKE_CASE=50256 , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" A : Union[str, Any] = vocab_size A : Optional[Any] = max_position_embeddings A : Dict = hidden_size A : Optional[Any] = num_layers A : Tuple = num_heads A : int = intermediate_size A : Optional[Any] = window_size A : List[Any] = activation_function A : Union[str, Any] = resid_dropout A : Any = embed_dropout A : List[Any] = attention_dropout A : str = classifier_dropout A : List[Any] = layer_norm_epsilon A : str = initializer_range A : List[str] = use_cache A : Optional[int] = bos_token_id A : List[Any] = eos_token_id A : int = attention_types A : int = self.expand_attention_types_params(SCREAMING_SNAKE_CASE ) if len(self.attention_layers ) != self.num_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.attention_layers)` == `config.num_layers` ''' F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, ' F'`config.num_layers = {self.num_layers}`. ' '''`config.attention_layers` is prepared using `config.attention_types`. ''' '''Please verify the value of `config.attention_types` argument.''' ) super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : List[str] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' import torch A : Tuple = input.size() A : Union[str, Any] = len(snake_case__ ) A : List[str] = shape[dimension] A : Union[str, Any] = torch.arange(0 , snake_case__ , snake_case__ ) A : List[str] = torch.div(sizedim - size , snake_case__ , rounding_mode='''floor''' ) + 1 A : Optional[int] = torch.arange(snake_case__ ) + low_indices[:min_length][:, None] A : str = [slice(snake_case__ )] * rank A : List[Any] = indices A : Union[str, Any] = input[s] A : List[str] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch A : List[str] = torch.arange(1 , snake_case__ ) A : Optional[int] = torch.remainder(snake_case__ , snake_case__ ) A : Optional[int] = remainders == 0 A : Optional[Any] = candidates[divisor_indices] A : Optional[int] = torch.max(snake_case__ ) return largest_divisor, torch.div(snake_case__ , snake_case__ , rounding_mode='''floor''' ) class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" A : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction='''inputs''' ) A : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: A : Dict = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return self._config.num_heads def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]: """simple docstring""" A : List[str] = super(SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs( SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() A : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch A, A : Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values A : str = seqlen + 2 A : List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A : Any = [ (torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] A : str = common_inputs['''attention_mask'''] if self.use_past: A : Optional[int] = ordered_inputs['''attention_mask'''].dtype A : List[str] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 ) return ordered_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return 13
3
0
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _A = get_tests_dir("fixtures/test_sentencepiece_no_bos.model") @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( __snake_case , unittest.TestCase ): _lowercase =PegasusTokenizer _lowercase =PegasusTokenizerFast _lowercase =True _lowercase =True def __a ( self ) -> Any: super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase_ = PegasusTokenizer(_UpperCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __a ( self ) -> str: return PegasusTokenizer.from_pretrained("google/pegasus-large" ) def __a ( self , **_UpperCamelCase ) -> PegasusTokenizer: return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase ) def __a ( self , _UpperCamelCase ) -> List[str]: return ("This is a test", "This is a test") def __a ( self ) -> str: lowerCAmelCase_ = '''</s>''' lowerCAmelCase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase ) def __a ( self ) -> Dict: lowerCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "</s>" ) self.assertEqual(vocab_keys[-1] , "v" ) self.assertEqual(len(_UpperCamelCase ) , 1_103 ) def __a ( self ) -> Optional[int]: self.assertEqual(self.get_tokenizer().vocab_size , 1_103 ) def __a ( self ) -> Dict: lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase_ = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase_ = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) lowerCAmelCase_ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase ).input_ids[0] lowerCAmelCase_ = py_tokenizer([raw_input_str] , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase ).input_ids[0] self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) def __a ( self ) -> List[str]: lowerCAmelCase_ = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase_ = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' lowerCAmelCase_ = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1] lowerCAmelCase_ = tokenizer([raw_input_str] , return_tensors=_UpperCamelCase ).input_ids[0] self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) def __a ( self ) -> Dict: lowerCAmelCase_ = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 96_103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1_024 lowerCAmelCase_ = '''To ensure a smooth flow of bank resolutions.''' lowerCAmelCase_ = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1] lowerCAmelCase_ = tokenizer([raw_input_str] , return_tensors=_UpperCamelCase ).input_ids[0] self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __a ( self ) -> List[Any]: lowerCAmelCase_ = ['''This is going to be way too long.''' * 150, '''short example'''] lowerCAmelCase_ = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase_ = self._large_tokenizer(_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors="pt" ) lowerCAmelCase_ = self._large_tokenizer( text_target=_UpperCamelCase , max_length=5 , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors="pt" ) assert batch.input_ids.shape == (2, 1_024) assert batch.attention_mask.shape == (2, 1_024) assert targets["input_ids"].shape == (2, 5) assert len(_UpperCamelCase ) == 2 # input_ids, attention_mask. @slow def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = {'''input_ids''': [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCamelCase , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , ) @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( __snake_case , unittest.TestCase ): _lowercase =PegasusTokenizer _lowercase =PegasusTokenizerFast _lowercase =True _lowercase =True def __a ( self ) -> Optional[int]: super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase_ = PegasusTokenizer(_UpperCamelCase , offset=0 , mask_token_sent=_UpperCamelCase , mask_token="[MASK]" ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __a ( self ) -> Union[str, Any]: return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" ) def __a ( self , **_UpperCamelCase ) -> PegasusTokenizer: return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase ) def __a ( self , _UpperCamelCase ) -> Union[str, Any]: return ("This is a test", "This is a test") def __a ( self ) -> Any: lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase_ = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase_ = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) lowerCAmelCase_ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase ).input_ids[0] lowerCAmelCase_ = py_tokenizer([raw_input_str] , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase ).input_ids[0] self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) @require_torch def __a ( self ) -> int: lowerCAmelCase_ = ['''This is going to be way too long.''' * 1_000, '''short example'''] lowerCAmelCase_ = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase_ = self._large_tokenizer(_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors="pt" ) lowerCAmelCase_ = self._large_tokenizer( text_target=_UpperCamelCase , max_length=5 , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors="pt" ) assert batch.input_ids.shape == (2, 4_096) assert batch.attention_mask.shape == (2, 4_096) assert targets["input_ids"].shape == (2, 5) assert len(_UpperCamelCase ) == 2 # input_ids, attention_mask. def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) lowerCAmelCase_ = self._large_tokenizer(_UpperCamelCase ).input_ids self.assertListEqual( _UpperCamelCase , [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] , )
231
'''simple docstring''' import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = [] A : Union[str, Any] = [] for i in range(self.num_layers ): A : Any = self.in_channels if i == 0 else self.out_channels A : Optional[Any] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Optional[int] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnets A : Union[str, Any] = attentions if self.add_downsample: A : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Union[str, Any]: """simple docstring""" A : Optional[Any] = () for resnet, attn in zip(self.resnets , self.attentions ): A : int = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Dict = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = [] for i in range(self.num_layers ): A : Optional[Any] = self.in_channels if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets if self.add_downsample: A : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]: """simple docstring""" A : str = () for resnet in self.resnets: A : Optional[int] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[int] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[Any] = [] A : Optional[int] = [] for i in range(self.num_layers ): A : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : Dict = self.prev_output_channel if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : int = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets A : Optional[Any] = attentions if self.add_upsample: A : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[int]: """simple docstring""" for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states A : List[str] = res_hidden_states_tuple[-1] A : int = res_hidden_states_tuple[:-1] A : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Tuple = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : Dict = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : int = [] for i in range(self.num_layers ): A : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : List[str] = self.prev_output_channel if i == 0 else self.out_channels A : str = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[Any] = resnets if self.add_upsample: A : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Tuple: """simple docstring""" for resnet in self.resnets: # pop res hidden states A : Optional[int] = res_hidden_states_tuple[-1] A : Optional[Any] = res_hidden_states_tuple[:-1] A : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Optional[Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : List[str] = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : str = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] A : List[Any] = [] for _ in range(self.num_layers ): A : int = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[str] = resnets A : List[str] = attentions def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict: """simple docstring""" A : Optional[Any] = self.resnets[0](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): A : Optional[int] = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) return hidden_states
3
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE (A ) -> int: """simple docstring""" if length <= 0 or not isinstance(snake_case__ , snake_case__ ): raise ValueError('''Length must be a positive integer.''' ) return [n * (2 * n - 1) for n in range(snake_case__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
2
'''simple docstring''' import os def lowerCAmelCase_ ( ): '''simple docstring''' A : List[Any] = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' ) with open(snake_case__ ) as file_hand: return str(sum(int(snake_case__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
3
0
from __future__ import annotations import typing from collections import Counter def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Any: lowerCAmelCase = Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(snake_case__ , max_perimeter + 1 ): lowerCAmelCase = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(snake_case__ ): lowerCAmelCase = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0_0 ) -> List[Any]: lowerCAmelCase = pythagorean_triple(snake_case__ ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(f'Perimeter {solution()} has maximum solutions')
338
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , snake_case__ ) A : Dict = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: A : Dict = dataset_size < in_memory_max_size else: A : Tuple = False A : int = is_small_dataset(snake_case__ ) assert result == expected
3
0
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class A__(unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self ) -> Union[str, Any]: a_ : Dict = mock.Mock() a_ : str = 500 a_ : List[str] = {} a_ : List[Any] = HTTPError a_ : str = {} # Download this model to make sure it's in the cache. a_ : Tuple = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" , return_value=_lowercase ) as mock_head: a_ : Dict = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def UpperCamelCase__ ( self ) -> Dict: a_ : str = mock.Mock() a_ : Optional[Any] = 500 a_ : str = {} a_ : Tuple = HTTPError a_ : str = {} # Download this model to make sure it's in the cache. a_ : str = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" , return_value=_lowercase ) as mock_head: a_ : Union[str, Any] = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase__ ( self ) -> Any: try: a_ : Dict = tempfile.mktemp() with open(_lowercase , """wb""" ) as f: http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" , _lowercase ) a_ : List[str] = AlbertTokenizer.from_pretrained(_lowercase ) finally: os.remove(_lowercase ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("""tokenizer.json""" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("""tokenizer.json""" , """wb""" ) as f: http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" , _lowercase ) a_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size , 1_000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("""tokenizer.json""" ) def UpperCamelCase__ ( self ) -> Any: a_ : int = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ) @is_staging_test class A__(unittest.TestCase ): """simple docstring""" _A : int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def UpperCamelCase__ ( cls ) -> Optional[int]: a_ : List[str] = TOKEN HfFolder.save_token(_lowercase ) @classmethod def UpperCamelCase__ ( cls ) -> List[str]: try: delete_repo(token=cls._token , repo_id="""test-tokenizer""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-tokenizer-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-tokenizer""" ) except HTTPError: pass def UpperCamelCase__ ( self ) -> List[str]: with tempfile.TemporaryDirectory() as tmp_dir: a_ : str = os.path.join(_lowercase , """vocab.txt""" ) with open(_lowercase , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) a_ : List[Any] = BertTokenizer(_lowercase ) tokenizer.push_to_hub("""test-tokenizer""" , use_auth_token=self._token ) a_ : Optional[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id="""test-tokenizer""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_lowercase , repo_id="""test-tokenizer""" , push_to_hub=_lowercase , use_auth_token=self._token ) a_ : int = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) def UpperCamelCase__ ( self ) -> Any: with tempfile.TemporaryDirectory() as tmp_dir: a_ : Union[str, Any] = os.path.join(_lowercase , """vocab.txt""" ) with open(_lowercase , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) a_ : Optional[int] = BertTokenizer(_lowercase ) tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" , use_auth_token=self._token ) a_ : List[Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-tokenizer-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( _lowercase , repo_id="""valid_org/test-tokenizer-org""" , push_to_hub=_lowercase , use_auth_token=self._token ) a_ : str = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) @require_tokenizers def UpperCamelCase__ ( self ) -> str: CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: a_ : List[Any] = os.path.join(_lowercase , """vocab.txt""" ) with open(_lowercase , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) a_ : Optional[int] = CustomTokenizer(_lowercase ) # No fast custom tokenizer tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token ) a_ : Dict = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowercase ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: a_ : Optional[Any] = os.path.join(_lowercase , """vocab.txt""" ) with open(_lowercase , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) a_ : str = BertTokenizerFast.from_pretrained(_lowercase ) bert_tokenizer.save_pretrained(_lowercase ) a_ : Any = CustomTokenizerFast.from_pretrained(_lowercase ) tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token ) a_ : Union[str, Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowercase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizerFast""" ) a_ : str = AutoTokenizer.from_pretrained( F'''{USER}/test-dynamic-tokenizer''' , use_fast=_lowercase , trust_remote_code=_lowercase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" ) class A__(unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self ) -> Dict: a_ : Optional[Any] = Trie() trie.add("""Hello 友達""" ) self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) trie.add("""Hello""" ) trie.data self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) def UpperCamelCase__ ( self ) -> List[str]: a_ : List[str] = Trie() self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS] This is a extra_id_100"""] ) trie.add("""[CLS]""" ) trie.add("""extra_id_1""" ) trie.add("""extra_id_100""" ) self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS]""", """ This is a """, """extra_id_100"""] ) def UpperCamelCase__ ( self ) -> Tuple: a_ : Dict = Trie() trie.add("""A""" ) self.assertEqual(trie.split("""ABC""" ) , ["""A""", """BC"""] ) self.assertEqual(trie.split("""BCA""" ) , ["""BC""", """A"""] ) def UpperCamelCase__ ( self ) -> Dict: a_ : List[Any] = Trie() trie.add("""TOKEN]""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] ) def UpperCamelCase__ ( self ) -> List[str]: a_ : str = Trie() trie.add("""A""" ) trie.add("""P""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] ) def UpperCamelCase__ ( self ) -> Union[str, Any]: a_ : Any = Trie() trie.add("""AB""" ) trie.add("""B""" ) trie.add("""C""" ) self.assertEqual(trie.split("""ABC""" ) , ["""AB""", """C"""] ) def UpperCamelCase__ ( self ) -> Any: a_ : List[str] = Trie() trie.add("""ABC""" ) trie.add("""B""" ) trie.add("""CD""" ) self.assertEqual(trie.split("""ABCD""" ) , ["""ABC""", """D"""] ) def UpperCamelCase__ ( self ) -> Optional[int]: a_ : Tuple = Trie() a_ : Optional[int] = trie.cut_text("""ABC""" , [0, 0, 2, 1, 2, 3] ) self.assertEqual(_lowercase , ["""AB""", """C"""] )
248
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class A ( __snake_case ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
3
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowercase : Any = { 'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'], 'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Tuple = ['VisionTextDualEncoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = ['FlaxVisionTextDualEncoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[Any] = ['TFVisionTextDualEncoderModel'] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
20
'''simple docstring''' from scipy.stats import pearsonr import datasets lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" if return_pvalue: A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
3
0
import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _SCREAMING_SNAKE_CASE = 16 _SCREAMING_SNAKE_CASE = 32 def SCREAMING_SNAKE_CASE__ ( __a , __a = 16 ): snake_case_ : int = AutoTokenizer.from_pretrained('bert-base-cased' ) snake_case_ : Optional[Any] = load_dataset('glue' , 'mrpc' ) def tokenize_function(__a ): # max_length=None => use the model max length (it's actually the default) snake_case_ : Tuple = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): snake_case_ : Optional[int] = datasets.map( snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case_ : Dict = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(__a ): # On TPU it's best to pad everything to the same length or training will be very slow. snake_case_ : Tuple = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": snake_case_ : List[Any] = 16 elif accelerator.mixed_precision != "no": snake_case_ : Dict = 8 else: snake_case_ : Tuple = None return tokenizer.pad( snake_case__ , padding='longest' , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors='pt' , ) # Instantiate dataloaders. snake_case_ : Optional[int] = DataLoader( tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ , drop_last=snake_case__ ) snake_case_ : int = DataLoader( tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ , drop_last=(accelerator.mixed_precision == 'fp8') , ) return train_dataloader, eval_dataloader def SCREAMING_SNAKE_CASE__ ( __a , __a ): snake_case_ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case_ : Optional[Any] = config['''lr'''] snake_case_ : Dict = int(config['num_epochs'] ) snake_case_ : List[str] = int(config['seed'] ) snake_case_ : Optional[int] = int(config['batch_size'] ) snake_case_ : Tuple = evaluate.load('glue' , 'mrpc' ) # If the batch size is too big we use gradient accumulation snake_case_ : List[str] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: snake_case_ : str = batch_size // MAX_GPU_BATCH_SIZE snake_case_ : int = MAX_GPU_BATCH_SIZE set_seed(snake_case__ ) snake_case_ : Optional[Any] = get_dataloaders(snake_case__ , snake_case__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case_ : List[str] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=snake_case__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). snake_case_ : int = model.to(accelerator.device ) # Instantiate optimizer snake_case_ : List[Any] = AdamW(params=model.parameters() , lr=snake_case__ ) # Instantiate scheduler snake_case_ : Any = get_linear_schedule_with_warmup( optimizer=snake_case__ , num_warmup_steps=1_00 , num_training_steps=(len(snake_case__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case_ : Tuple = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Now we train the model for epoch in range(snake_case__ ): model.train() for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) snake_case_ : Optional[Any] = model(**snake_case__ ) snake_case_ : Optional[Any] = outputs.loss snake_case_ : Dict = loss / gradient_accumulation_steps accelerator.backward(snake_case__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): snake_case_ : str = model(**snake_case__ ) snake_case_ : str = outputs.logits.argmax(dim=-1 ) snake_case_ : List[str] = accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=snake_case__ , references=snake_case__ , ) snake_case_ : Optional[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : str = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=snake_case__ , default=snake_case__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) snake_case_ : int = parser.parse_args() snake_case_ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(snake_case__ , snake_case__ ) if __name__ == "__main__": main()
327
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowercase : Dict = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[Any] = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[str] = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Dict = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
3
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { 'facebook/data2vec-vision-base-ft': ( 'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json' ), } class UpperCamelCase ( __snake_case ): UpperCamelCase : int = '''data2vec-vision''' def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict=768 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[Any]=3072 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : str=0.0_2 , UpperCAmelCase__ : str=1E-12 , UpperCAmelCase__ : str=224 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : int=False , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : int=[3, 5, 7, 11] , UpperCAmelCase__ : Tuple=[1, 2, 3, 6] , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Any=0.4 , UpperCAmelCase__ : List[str]=256 , UpperCAmelCase__ : Optional[int]=1 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Union[str, Any]=255 , **UpperCAmelCase__ : Union[str, Any] , ) -> Optional[int]: super().__init__(**UpperCAmelCase__ ) _a : Tuple = hidden_size _a : Optional[int] = num_hidden_layers _a : Union[str, Any] = num_attention_heads _a : Dict = intermediate_size _a : Any = hidden_act _a : Optional[int] = hidden_dropout_prob _a : Optional[Any] = attention_probs_dropout_prob _a : List[Any] = initializer_range _a : Any = layer_norm_eps _a : Any = image_size _a : Union[str, Any] = patch_size _a : Optional[Any] = num_channels _a : Union[str, Any] = use_mask_token _a : int = use_absolute_position_embeddings _a : List[str] = use_relative_position_bias _a : List[Any] = use_shared_relative_position_bias _a : str = layer_scale_init_value _a : int = drop_path_rate _a : Tuple = use_mean_pooling # decode head attributes (semantic segmentation) _a : Tuple = out_indices _a : Optional[int] = pool_scales # auxiliary head attributes (semantic segmentation) _a : int = use_auxiliary_head _a : List[Any] = auxiliary_loss_weight _a : str = auxiliary_channels _a : Union[str, Any] = auxiliary_num_convs _a : List[Any] = auxiliary_concat_input _a : Dict = semantic_loss_ignore_index class UpperCamelCase ( __snake_case ): UpperCamelCase : int = version.parse('''1.11''' ) @property def _lowercase ( self : int ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _lowercase ( self : str ) -> float: return 1E-4
294
'''simple docstring''' import os import sys import unittest lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py') lowercase : Optional[int] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py') class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Any = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : List[Any] = {'''BertModelTest''': '''BertModelTester'''} A : int = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : List[str] = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } A : Union[str, Any] = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : int = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Dict = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } A : str = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
3
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _a : Optional[Any] = logging.get_logger(__name__) _a : Dict = { 'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json', 'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json', } class __A ( __snake_case ): _UpperCamelCase : Any = "luke" def __init__( self , a__=50267 , a__=500000 , a__=768 , a__=256 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=True , a__=None , a__=1 , a__=0 , a__=2 , **a__ , ): super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ ) _lowerCAmelCase : Union[str, Any] = vocab_size _lowerCAmelCase : int = entity_vocab_size _lowerCAmelCase : Optional[int] = hidden_size _lowerCAmelCase : Union[str, Any] = entity_emb_size _lowerCAmelCase : List[str] = num_hidden_layers _lowerCAmelCase : Optional[Any] = num_attention_heads _lowerCAmelCase : List[str] = hidden_act _lowerCAmelCase : Union[str, Any] = intermediate_size _lowerCAmelCase : Tuple = hidden_dropout_prob _lowerCAmelCase : Tuple = attention_probs_dropout_prob _lowerCAmelCase : Tuple = max_position_embeddings _lowerCAmelCase : List[Any] = type_vocab_size _lowerCAmelCase : int = initializer_range _lowerCAmelCase : Optional[Any] = layer_norm_eps _lowerCAmelCase : List[str] = use_entity_aware_attention _lowerCAmelCase : List[str] = classifier_dropout
44
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class A ( __snake_case ): __magic_name__ = DistilBertTokenizer __magic_name__ = DistilBertTokenizerFast __magic_name__ = True @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[Any] = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' ) A : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE ) A : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
3
0
from scipy.stats import pearsonr import datasets __UpperCamelCase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' __UpperCamelCase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' __UpperCamelCase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class lowercase__ ( datasets.Metric): def __A ( self : int ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __A ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=False ): '''simple docstring''' if return_pvalue: SCREAMING_SNAKE_CASE : Union[str, Any] = pearsonr(UpperCamelCase__ , UpperCamelCase__ ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(UpperCamelCase__ , UpperCamelCase__ )[0] )}
182
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase : Optional[int] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''input_features''', '''attention_mask'''] def __init__( self , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=16000 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" super().__init__(feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = num_mel_bins A : Tuple = do_ceptral_normalize A : Dict = normalize_means A : List[Any] = normalize_vars A : List[str] = True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers A : Any = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) A : Any = ta_kaldi.fbank(SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray: """simple docstring""" if normalize_means: A : Dict = x[:input_length].mean(axis=0 ) A : Optional[Any] = np.subtract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if normalize_vars: A : str = x[:input_length].std(axis=0 ) A : int = np.divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if input_length < x.shape[0]: A : List[str] = padding_value # make sure array is in float32 A : Tuple = x.astype(np.floataa ) return x def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]: """simple docstring""" A : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' F' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A : List[Any] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) A : Tuple = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ): A : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A : Any = [raw_speech] # extract fbank features A : List[str] = [self._extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in raw_speech] # convert into correct format for padding A : str = BatchFeature({'''input_features''': features} ) A : Union[str, Any] = self.pad( SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) # make sure list is in array format A : List[str] = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ): A : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features] A : Union[str, Any] = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: A : Dict = ( np.array(SCREAMING_SNAKE_CASE , dtype=np.intaa ) if self._get_padding_strategies(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD else None ) A : List[Any] = self.normalize( padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE ) if return_tensors is not None: A : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE ) return padded_inputs
3
0
'''simple docstring''' from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ ( __snake_case ): lowerCAmelCase_ = '''new-model''' if is_tf_available(): class lowerCAmelCase__ ( __snake_case ): lowerCAmelCase_ = NewModelConfig @require_tf class lowerCAmelCase__ ( unittest.TestCase ): @slow def _snake_case ( self ): """simple docstring""" lowercase_ : Dict = '''bert-base-cased''' lowercase_ : Union[str, Any] = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Any = TFAutoModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @slow def _snake_case ( self ): """simple docstring""" lowercase_ : Optional[int] = '''bert-base-cased''' lowercase_ : Any = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = TFAutoModelForPreTraining.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @slow def _snake_case ( self ): """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : List[Any] = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = TFAutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = TFAutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE , output_loading_info=__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @slow def _snake_case ( self ): """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : int = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Any = TFAutoModelWithLMHead.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @slow def _snake_case ( self ): """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : Tuple = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : int = TFAutoModelForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE ) lowercase_ : str = TFAutoModelForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE , output_loading_info=__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @slow def _snake_case ( self ): """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : int = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(__SCREAMING_SNAKE_CASE , output_loading_info=__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @slow def _snake_case ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase_ : List[str] = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : int = TFAutoModelForSequenceClassification.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @slow def _snake_case ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase_ : Tuple = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = TFAutoModelForQuestionAnswering.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @slow @require_tensorflow_probability def _snake_case ( self ): """simple docstring""" for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: lowercase_ : Optional[int] = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(__SCREAMING_SNAKE_CASE ) lowercase_ : int = TFAutoModelForTableQuestionAnswering.from_pretrained( __SCREAMING_SNAKE_CASE , output_loading_info=__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" lowercase_ : Dict = TFAutoModelWithLMHead.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__SCREAMING_SNAKE_CASE ) , 1_44_10 ) def _snake_case ( self ): """simple docstring""" lowercase_ : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__SCREAMING_SNAKE_CASE ) , 1_44_10 ) def _snake_case ( self ): """simple docstring""" lowercase_ : int = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = copy.deepcopy(model.config ) lowercase_ : str = ['''FunnelBaseModel'''] lowercase_ : Any = TFAutoModel.from_config(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = TFAutoModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" try: AutoConfig.register('''new-model''' , __SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(__SCREAMING_SNAKE_CASE ): auto_class.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) auto_class.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__SCREAMING_SNAKE_CASE ): auto_class.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API lowercase_ : Union[str, Any] = BertModelTester(self ).get_config() lowercase_ : List[Any] = NewModelConfig(**tiny_config.to_dict() ) lowercase_ : List[str] = auto_class.from_config(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = auto_class.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def _snake_case ( self ): """simple docstring""" with self.assertRaisesRegex( __SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ): lowercase_ : Optional[int] = TFAutoModel.from_pretrained('''bert-base''' ) def _snake_case ( self ): """simple docstring""" with self.assertRaisesRegex( __SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): lowercase_ : List[str] = TFAutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def _snake_case ( self ): """simple docstring""" with self.assertRaisesRegex( __SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ): lowercase_ : Any = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def _snake_case ( self ): """simple docstring""" with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Use `from_pt=True` to load this model''' ): lowercase_ : Dict = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' ) def _snake_case ( self ): """simple docstring""" lowercase_ : List[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: lowercase_ : Union[str, Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint lowercase_ : Any = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) with RequestCounter() as counter: lowercase_ : List[Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
93
'''simple docstring''' import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 lowercase : str = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowercase : str = get_tests_dir('fixtures/vocab.json') lowercase : int = get_tests_dir('fixtures') class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = 0 def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : List[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Union[str, Any] = WavaVecaConfig() A : List[str] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) # save in new folder model_config.save_pretrained(SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Dict = WavaVecaFeatureExtractor() A : List[str] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in tokenizer with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : List[Any] = WavaVecaFeatureExtractor() A : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in feature extractor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : str = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : str = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' ) model_config.save_pretrained(SCREAMING_SNAKE_CASE ) # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) # create emtpy sample processor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write('''{}''' ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Union[str, Any] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) A : List[str] = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) A : Tuple = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version A : List[str] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE ) A : int = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE ): AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API A : List[Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Tuple = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : Optional[int] = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Any = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = '''AutoFeatureExtractor''' __magic_name__ = '''AutoTokenizer''' __magic_name__ = False try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local classes. A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. A : Optional[int] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. A : Tuple = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : int = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' ) self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' ) @is_staging_test class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def __lowerCAmelCase ( cls ) -> Dict: """simple docstring""" A : Optional[int] = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE ) @classmethod def __lowerCAmelCase ( cls ) -> Any: """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' ) except HTTPError: pass def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Union[str, Any] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A : int = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor-org''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization='''valid_org''' , ) A : int = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() A : Any = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : str = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F'{USER}/test-dynamic-processor' , token=self._token ) A : List[str] = Repository(SCREAMING_SNAKE_CASE , clone_from=F'{USER}/test-dynamic-processor' , token=self._token ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { '''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''', '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) self.assertDictEqual( tokenizer_config['''auto_map'''] , { '''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None], '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_feature_extraction.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_tokenization.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_processing.py''' ) ) ) repo.push_to_hub() A : Optional[int] = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
3
0
import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any]=[] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = size[0] - overlap_pixels * 2 SCREAMING_SNAKE_CASE__ = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels SCREAMING_SNAKE_CASE__ = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55 SCREAMING_SNAKE_CASE__ = np.pad(snake_case__ , mode="""linear_ramp""" , pad_width=snake_case__ , end_values=0 ) if "l" in remove_borders: SCREAMING_SNAKE_CASE__ = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: SCREAMING_SNAKE_CASE__ = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: SCREAMING_SNAKE_CASE__ = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: SCREAMING_SNAKE_CASE__ = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Any ) -> Union[str, Any]: """simple docstring""" return max(snake_case__ , min(snake_case__ , snake_case__ ) ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple ) -> int: """simple docstring""" return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = list(snake_case__ ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap SCREAMING_SNAKE_CASE__ = clamp_rect(snake_case__ , [0, 0] , [image_size[0], image_size[1]] ) return rect def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(snake_case__ , (original_slice, 0) ) return result def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : List[str] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) SCREAMING_SNAKE_CASE__ = tile.crop(snake_case__ ) return tile def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Dict ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = n % d return n - divisor class __snake_case ( __snake_case ): def __init__( self : Optional[int] , _lowercase : int , _lowercase : Tuple , _lowercase : Any , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[int] , _lowercase : Optional[Any] = 3_50 , ): """simple docstring""" super().__init__( vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , unet=_lowercase , low_res_scheduler=_lowercase , scheduler=_lowercase , max_noise_level=_lowercase , ) def __a ( self : List[str] , _lowercase : str , _lowercase : int , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Optional[int] , **_lowercase : Dict ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) SCREAMING_SNAKE_CASE__ = add_overlap_rect(_lowercase , _lowercase , image.size ) SCREAMING_SNAKE_CASE__ = image.crop(_lowercase ) SCREAMING_SNAKE_CASE__ = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] SCREAMING_SNAKE_CASE__ = translated_slice_x - (original_image_slice / 2) SCREAMING_SNAKE_CASE__ = max(0 , _lowercase ) SCREAMING_SNAKE_CASE__ = squeeze_tile(_lowercase , _lowercase , _lowercase , _lowercase ) SCREAMING_SNAKE_CASE__ = to_input.size SCREAMING_SNAKE_CASE__ = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) SCREAMING_SNAKE_CASE__ = super(_lowercase , self ).__call__(image=_lowercase , **_lowercase ).images[0] SCREAMING_SNAKE_CASE__ = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) SCREAMING_SNAKE_CASE__ = unsqueeze_tile(_lowercase , _lowercase ) SCREAMING_SNAKE_CASE__ = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) SCREAMING_SNAKE_CASE__ = [] if x == 0: remove_borders.append("""l""" ) elif crop_rect[2] == image.size[0]: remove_borders.append("""r""" ) if y == 0: remove_borders.append("""t""" ) elif crop_rect[3] == image.size[1]: remove_borders.append("""b""" ) SCREAMING_SNAKE_CASE__ = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=_lowercase ) , mode="""L""" , ) final_image.paste( _lowercase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , _lowercase ) @torch.no_grad() def __call__( self : int , _lowercase : Optional[Any] , _lowercase : List[Any] , _lowercase : List[str] = 75 , _lowercase : Optional[int] = 9.0 , _lowercase : int = 50 , _lowercase : str = None , _lowercase : int = 1 , _lowercase : str = 0.0 , _lowercase : str = None , _lowercase : Union[str, Any] = None , _lowercase : List[str] = None , _lowercase : Tuple = 1 , _lowercase : Union[str, Any] = 1_28 , _lowercase : Optional[int] = 32 , _lowercase : Optional[Any] = 32 , ): """simple docstring""" SCREAMING_SNAKE_CASE__ = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) ) SCREAMING_SNAKE_CASE__ = math.ceil(image.size[0] / tile_size ) SCREAMING_SNAKE_CASE__ = math.ceil(image.size[1] / tile_size ) SCREAMING_SNAKE_CASE__ = tcx * tcy SCREAMING_SNAKE_CASE__ = 0 for y in range(_lowercase ): for x in range(_lowercase ): self._process_tile( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , prompt=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , noise_level=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , ) current_count += 1 if callback is not None: callback({"""progress""": current_count / total_tile_count, """image""": final_image} ) return final_image def __SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = '''stabilityai/stable-diffusion-x4-upscaler''' SCREAMING_SNAKE_CASE__ = StableDiffusionTiledUpscalePipeline.from_pretrained(snake_case__ , revision="""fp16""" , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ = pipe.to("""cuda""" ) SCREAMING_SNAKE_CASE__ = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" ) def callback(__UpperCamelCase : Union[str, Any] ): print(f"""progress: {obj['progress']:.4f}""" ) obj["image"].save("""diffusers_library_progress.jpg""" ) SCREAMING_SNAKE_CASE__ = pipe(image=snake_case__ , prompt="""Black font, white background, vector""" , noise_level=40 , callback=snake_case__ ) final_image.save("""diffusers_library.jpg""" ) if __name__ == "__main__": main()
219
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowercase : Optional[Any] = None lowercase : Tuple = logging.get_logger(__name__) lowercase : Dict = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'} lowercase : Tuple = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, 'tokenizer_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json', }, } lowercase : List[str] = { 'google/rembert': 2_56, } lowercase : Dict = '▁' class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = RemBertTokenizer def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token super().__init__( SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : List[Any] = do_lower_case A : str = remove_space A : int = keep_accents A : Union[str, Any] = vocab_file A : List[Any] = False if not self.vocab_file else True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : List[Any] = [self.sep_token_id] A : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : Tuple = [self.sep_token_id] A : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) ) return A : Any = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
3
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { 'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json', 'BridgeTower/bridgetower-base-itm-mlm': ( 'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json' ), } class _lowerCAmelCase ( __snake_case ): _lowercase ='''bridgetower_vision_model''' def __init__( self , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=3 , _UpperCamelCase=16 , _UpperCamelCase=288 , _UpperCamelCase=1 , _UpperCamelCase=1e-0_5 , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=False , **_UpperCamelCase , ) -> Optional[Any]: super().__init__(**_UpperCamelCase ) lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_channels lowerCAmelCase_ = patch_size lowerCAmelCase_ = image_size lowerCAmelCase_ = initializer_factor lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = stop_gradient lowerCAmelCase_ = share_layernorm lowerCAmelCase_ = remove_last_layer @classmethod def __a ( cls , _UpperCamelCase , **_UpperCamelCase ) -> "PretrainedConfig": lowerCAmelCase_ = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase ) if config_dict.get("model_type" ) == "bridgetower": lowerCAmelCase_ = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_UpperCamelCase , **_UpperCamelCase ) class _lowerCAmelCase ( __snake_case ): _lowercase ='''bridgetower_text_model''' def __init__( self , _UpperCamelCase=50_265 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=1 , _UpperCamelCase=3_072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=514 , _UpperCamelCase=1 , _UpperCamelCase=1e-0_5 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , _UpperCamelCase="absolute" , _UpperCamelCase=True , **_UpperCamelCase , ) -> List[str]: super().__init__(**_UpperCamelCase ) lowerCAmelCase_ = vocab_size lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = hidden_act lowerCAmelCase_ = initializer_factor lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = type_vocab_size lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = position_embedding_type lowerCAmelCase_ = use_cache lowerCAmelCase_ = pad_token_id lowerCAmelCase_ = bos_token_id lowerCAmelCase_ = eos_token_id @classmethod def __a ( cls , _UpperCamelCase , **_UpperCamelCase ) -> "PretrainedConfig": lowerCAmelCase_ = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase ) if config_dict.get("model_type" ) == "bridgetower": lowerCAmelCase_ = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_UpperCamelCase , **_UpperCamelCase ) class _lowerCAmelCase ( __snake_case ): _lowercase ='''bridgetower''' def __init__( self , _UpperCamelCase=True , _UpperCamelCase="gelu" , _UpperCamelCase=768 , _UpperCamelCase=1 , _UpperCamelCase=1e-0_5 , _UpperCamelCase=False , _UpperCamelCase="add" , _UpperCamelCase=12 , _UpperCamelCase=6 , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase , ) -> Tuple: lowerCAmelCase_ = kwargs.pop("text_config_dict" , _UpperCamelCase ) lowerCAmelCase_ = kwargs.pop("vision_config_dict" , _UpperCamelCase ) super().__init__(**_UpperCamelCase ) lowerCAmelCase_ = share_cross_modal_transformer_layers lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_size lowerCAmelCase_ = initializer_factor lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = share_link_tower_layers lowerCAmelCase_ = link_tower_type lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = tie_word_embeddings lowerCAmelCase_ = init_layernorm_from_vision_encoder if text_config is None: lowerCAmelCase_ = {} logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." ) if vision_config is None: lowerCAmelCase_ = {} logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." ) lowerCAmelCase_ = BridgeTowerTextConfig(**_UpperCamelCase ) lowerCAmelCase_ = BridgeTowerVisionConfig(**_UpperCamelCase ) @classmethod def __a ( cls , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> List[Any]: return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCamelCase ) def __a ( self ) -> List[Any]: lowerCAmelCase_ = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ = self.text_config.to_dict() lowerCAmelCase_ = self.vision_config.to_dict() lowerCAmelCase_ = self.__class__.model_type return output
231
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase : Optional[Any] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''pixel_values'''] def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : str = size if size is not None else {'''shortest_edge''': 384} A : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : str = do_resize A : List[Any] = size # Default value set here for backwards compatibility where the value in config is None A : List[Any] = crop_pct if crop_pct is not None else 224 / 256 A : Optional[int] = resample A : Union[str, Any] = do_rescale A : List[str] = rescale_factor A : Union[str, Any] = do_normalize A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : str = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) A : Any = size['''shortest_edge'''] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct A : Dict = int(shortest_edge / crop_pct ) A : str = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : int = resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) else: # warping (no cropping) when evaluated at 384 or larger return resize( SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image: """simple docstring""" A : int = do_resize if do_resize is not None else self.do_resize A : Tuple = crop_pct if crop_pct is not None else self.crop_pct A : Optional[Any] = resample if resample is not None else self.resample A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean A : List[str] = image_std if image_std is not None else self.image_std A : Union[str, Any] = size if size is not None else self.size A : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : Any = make_list_of_images(SCREAMING_SNAKE_CASE ) if not valid_images(SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. A : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images] if do_resize: A : Any = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , crop_pct=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: A : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: A : Dict = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images] A : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images] A : Optional[int] = {'''pixel_values''': images} return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
3
0
'''simple docstring''' import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training') # TF training parameters lowerCamelCase : Tuple = False lowerCamelCase : str = False def _SCREAMING_SNAKE_CASE (A ) -> List[Any]: """simple docstring""" return TrainCommand(snake_case__ ) class __lowerCAmelCase (__snake_case ): '''simple docstring''' @staticmethod def UpperCamelCase__ (UpperCamelCase : int ): '''simple docstring''' lowercase__ = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' ) train_parser.add_argument( '''--train_data''' , type=UpperCamelCase , required=UpperCamelCase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , ) train_parser.add_argument( '''--column_label''' , type=UpperCamelCase , default=0 , help='''Column of the dataset csv file with example labels.''' ) train_parser.add_argument( '''--column_text''' , type=UpperCamelCase , default=1 , help='''Column of the dataset csv file with example texts.''' ) train_parser.add_argument( '''--column_id''' , type=UpperCamelCase , default=2 , help='''Column of the dataset csv file with example ids.''' ) train_parser.add_argument( '''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' ) train_parser.add_argument('''--validation_data''' , type=UpperCamelCase , default='''''' , help='''path to validation dataset.''' ) train_parser.add_argument( '''--validation_split''' , type=UpperCamelCase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , ) train_parser.add_argument('''--output''' , type=UpperCamelCase , default='''./''' , help='''path to saved the trained model.''' ) train_parser.add_argument( '''--task''' , type=UpperCamelCase , default='''text_classification''' , help='''Task to train the model on.''' ) train_parser.add_argument( '''--model''' , type=UpperCamelCase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' ) train_parser.add_argument('''--train_batch_size''' , type=UpperCamelCase , default=32 , help='''Batch size for training.''' ) train_parser.add_argument('''--valid_batch_size''' , type=UpperCamelCase , default=64 , help='''Batch size for validation.''' ) train_parser.add_argument('''--learning_rate''' , type=UpperCamelCase , default=3E-5 , help='''Learning rate.''' ) train_parser.add_argument('''--adam_epsilon''' , type=UpperCamelCase , default=1E-08 , help='''Epsilon for Adam optimizer.''' ) train_parser.set_defaults(func=UpperCamelCase ) def __init__(self : Optional[Any] , UpperCamelCase : int ): '''simple docstring''' lowercase__ = logging.get_logger('''transformers-cli/training''' ) lowercase__ = '''tf''' if is_tf_available() else '''torch''' os.makedirs(args.output , exist_ok=UpperCamelCase ) lowercase__ = args.output lowercase__ = args.column_label lowercase__ = args.column_text lowercase__ = args.column_id self.logger.info(f"Loading {args.task} pipeline for {args.model}" ) if args.task == "text_classification": lowercase__ = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(f"Loading dataset from {args.train_data}" ) lowercase__ = Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) lowercase__ = None if args.validation_data: self.logger.info(f"Loading validation dataset from {args.validation_data}" ) lowercase__ = Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) lowercase__ = args.validation_split lowercase__ = args.train_batch_size lowercase__ = args.valid_batch_size lowercase__ = args.learning_rate lowercase__ = args.adam_epsilon def UpperCamelCase__ (self : int ): '''simple docstring''' if self.framework == "tf": return self.run_tf() return self.run_torch() def UpperCamelCase__ (self : Tuple ): '''simple docstring''' raise NotImplementedError def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
2
'''simple docstring''' import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[str] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> int: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Any = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: A : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: A : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : int = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ): A : List[Any] = FlaxAutoModel.from_pretrained('''bert-base''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): A : Optional[int] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ): A : List[str] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , '''Use `from_pt=True` to load this model''' ): A : Any = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
3
0
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase__ : Union[str, Any] = logging.get_logger(__name__) lowercase__ : Union[str, Any] = {'vocab_file': 'vocab.txt'} lowercase__ : List[Any] = { 'vocab_file': { 'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt', }, } lowercase__ : Optional[int] = { 'openbmb/cpm-ant-10b': 1_0_2_4, } def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Tuple: lowerCAmelCase = collections.OrderedDict() with open(snake_case__ , '''r''' , encoding='''utf-8''' ) as reader: lowerCAmelCase = reader.readlines() for index, token in enumerate(snake_case__ ): lowerCAmelCase = token.rstrip('''\n''' ) lowerCAmelCase = index return vocab class lowercase_ ( __snake_case ): """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE=200 ) ->Union[str, Any]: lowerCAmelCase = vocab lowerCAmelCase = unk_token lowerCAmelCase = max_input_chars_per_word def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str: lowerCAmelCase = list(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > self.max_input_chars_per_word: return [self.unk_token] lowerCAmelCase = 0 lowerCAmelCase = [] while start < len(__SCREAMING_SNAKE_CASE ): lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = None while start < end: lowerCAmelCase = ''''''.join(chars[start:end] ) if substr in self.vocab: lowerCAmelCase = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = end return sub_tokens class lowercase_ ( __snake_case ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = VOCAB_FILES_NAMES UpperCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ : str = ["""input_ids""", """attention_mask"""] UpperCAmelCase_ : Any = False def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<d>" , __SCREAMING_SNAKE_CASE="</d>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="</n>" , __SCREAMING_SNAKE_CASE="</_>" , __SCREAMING_SNAKE_CASE="left" , **__SCREAMING_SNAKE_CASE , ) ->Union[str, Any]: requires_backends(self , ['''jieba'''] ) super().__init__( bod_token=__SCREAMING_SNAKE_CASE , eod_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , line_token=__SCREAMING_SNAKE_CASE , space_token=__SCREAMING_SNAKE_CASE , padding_side=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = bod_token lowerCAmelCase = eod_token lowerCAmelCase = load_vocab(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.encoder[space_token] lowerCAmelCase = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] lowerCAmelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __SCREAMING_SNAKE_CASE : x[1] ) ) lowerCAmelCase = {v: k for k, v in self.encoder.items()} lowerCAmelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: return self.encoder[self.bod_token] @property def SCREAMING_SNAKE_CASE_ ( self ) ->int: return self.encoder[self.eod_token] @property def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: return self.encoder["\n"] @property def SCREAMING_SNAKE_CASE_ ( self ) ->int: return len(self.encoder ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: return dict(self.encoder , **self.added_tokens_encoder ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[Any]: lowerCAmelCase = [] for x in jieba.cut(__SCREAMING_SNAKE_CASE , cut_all=__SCREAMING_SNAKE_CASE ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) ) return output_tokens def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->str: lowerCAmelCase = [i for i in token_ids if i >= 0] lowerCAmelCase = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Optional[Any]: return token in self.encoder def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str: return "".join(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Union[str, Any]: return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple: return self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple[str]: if os.path.isdir(__SCREAMING_SNAKE_CASE ): lowerCAmelCase = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) else: lowerCAmelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory lowerCAmelCase = 0 if " " in self.encoder: lowerCAmelCase = self.encoder[''' '''] del self.encoder[" "] if "\n" in self.encoder: lowerCAmelCase = self.encoder['''\n'''] del self.encoder["\n"] lowerCAmelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __SCREAMING_SNAKE_CASE : x[1] ) ) with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." ''' Please check that the vocabulary is not corrupted!''' ) lowerCAmelCase = token_index writer.write(token + '''\n''' ) index += 1 return (vocab_file,) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]: if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) ->List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE ) if token_ids_a is not None: return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) return [1] + ([0] * len(__SCREAMING_SNAKE_CASE ))
338
'''simple docstring''' import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowercase : Union[str, Any] = { 'text_branch': 'text_model', 'audio_branch': 'audio_model.audio_encoder', 'attn': 'attention.self', 'self.proj': 'output.dense', 'attention.self_mask': 'attn_mask', 'mlp.fc1': 'intermediate.dense', 'mlp.fc2': 'output.dense', 'norm1': 'layernorm_before', 'norm2': 'layernorm_after', 'bn0': 'batch_norm', } lowercase : Tuple = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc') def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' A, A : Tuple = create_model( '''HTSAT-tiny''' , '''roberta''' , snake_case__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case__ , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = {} A : str = R'''.*sequential.(\d+).*''' A : Union[str, Any] = R'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: A : Any = key.replace(snake_case__ , snake_case__ ) if re.match(snake_case__ , snake_case__ ): # replace sequential layers with list A : Any = re.match(snake_case__ , snake_case__ ).group(1 ) A : List[str] = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case__ )//3}.linear.' ) elif re.match(snake_case__ , snake_case__ ): A : Union[str, Any] = int(re.match(snake_case__ , snake_case__ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... A : str = 1 if projecton_layer == 0 else 2 A : Optional[Any] = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' ) if "audio" and "qkv" in key: # split qkv into query key and value A : int = value A : List[Any] = mixed_qkv.size(0 ) // 3 A : Union[str, Any] = mixed_qkv[:qkv_dim] A : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2] A : Optional[int] = mixed_qkv[qkv_dim * 2 :] A : Tuple = query_layer A : Union[str, Any] = key_layer A : Optional[int] = value_layer else: A : Dict = value return model_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' A, A : int = init_clap(snake_case__ , enable_fusion=snake_case__ ) clap_model.eval() A : str = clap_model.state_dict() A : Union[str, Any] = rename_state_dict(snake_case__ ) A : Tuple = ClapConfig() A : str = enable_fusion A : str = ClapModel(snake_case__ ) # ignore the spectrogram embedding layer model.load_state_dict(snake_case__ , strict=snake_case__ ) model.save_pretrained(snake_case__ ) transformers_config.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not') lowercase : Tuple = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
3
0
import sys def _UpperCAmelCase ( a__): '''simple docstring''' a_ : Union[str, Any] = len(snake_case__) a_ : Union[str, Any] = [[0 for x in range(snake_case__)] for x in range(snake_case__)] a_ : List[Any] = [[0 for x in range(snake_case__)] for x in range(snake_case__)] for chain_length in range(2 , snake_case__): for a in range(1 , n - chain_length + 1): a_ : Any = a + chain_length - 1 a_ : str = sys.maxsize for c in range(snake_case__ , snake_case__): a_ : Optional[Any] = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: a_ : Optional[Any] = cost a_ : Dict = c return matrix, sol def _UpperCAmelCase ( a__ , a__ , a__): '''simple docstring''' if i == j: print("""A""" + str(snake_case__) , end=""" """) else: print("""(""" , end=""" """) print_optiomal_solution(snake_case__ , snake_case__ , optimal_solution[i][j]) print_optiomal_solution(snake_case__ , optimal_solution[i][j] + 1 , snake_case__) print(""")""" , end=""" """) def _UpperCAmelCase ( ): '''simple docstring''' a_ : List[Any] = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5] a_ : Tuple = len(snake_case__) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 a_ : Dict = matrix_chain_order(snake_case__) print("""No. of Operation required: """ + str(matrix[1][n - 1])) print_optiomal_solution(snake_case__ , 1 , n - 1) if __name__ == "__main__": main()
248
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging lowercase : Dict = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: A : Union[str, Any] = os.path.abspath(snake_case__ ) logger.info(F'Loading PyTorch weights from {pt_path}' ) A : Any = torch.load(snake_case__ , map_location='''cpu''' ) logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' ) A : List[str] = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files A : Any = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ ) return flax_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' def is_key_or_prefix_key_in_dict(snake_case__ ) -> bool: return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0 # layer norm A : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean A : Tuple = pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var A : Dict = pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # embedding A : Any = pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # conv layer A : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ): A : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer A : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ): A : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight A : Dict = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias A : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 A : Dict = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): A : List[Any] = pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): A : List[str] = pt_tuple_key[-2] + '''_v''' if name is not None: A : int = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} A : int = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: A : List[str] = flax_model.params['''params'''] else: A : Dict = flax_model.params A : List[Any] = flatten_dict(snake_case__ ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : List[str] = flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(snake_case__ ) A : int = {} A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : int = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : str = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : Union[str, Any] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Any = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Dict = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : Any = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: A : Tuple = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : List[str] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch # Load the index A : Union[str, Any] = {} for shard_file in shard_filenames: # load using msgpack utils A : List[str] = torch.load(snake_case__ ) A : int = {k: v.numpy() for k, v in pt_state_dict.items()} A : Tuple = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : Optional[int] = flax_model.params['''params'''] A : List[Any] = flatten_dict(snake_case__ ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: A : Dict = flax_model.params A : Tuple = flatten_dict(snake_case__ ) A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : List[str] = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : int = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : List[str] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Union[str, Any] = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Any = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : int = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue if "var" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = os.path.abspath(snake_case__ ) logger.info(F'Loading Flax weights from {flax_checkpoint_path}' ) # import correct flax class A : List[str] = getattr(snake_case__ , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(snake_case__ , '''rb''' ) as state_f: try: A : int = from_bytes(snake_case__ , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights A : List[str] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) A : Optional[Any] = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) A : Union[str, Any] = flatten_dict(snake_case__ ) A : List[Any] = pt_model.state_dict() A : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) A : Tuple = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys A : int = [] A : Any = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): A : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix A : int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: A : List[str] = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: A : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict: # conv layer A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) A : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict: # linear layer A : Tuple = flax_key_tuple[:-1] + ('''weight''',) A : Tuple = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: A : List[Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: A : Union[str, Any] = '''.'''.join(snake_case__ ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. A : int = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: A : Optional[int] = key.split('''.''' ) A : Dict = None if key_components[-3::2] == ["parametrizations", "original0"]: A : List[str] = key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: A : List[Any] = key_components[-2] + '''_v''' if name is not None: A : str = key_components[:-3] + [name] A : Optional[Any] = '''.'''.join(snake_case__ ) A : Optional[Any] = key if flax_key in special_pt_names: A : Optional[Any] = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict A : Dict = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor A : Dict = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list A : List[Any] = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' ) if len(snake_case__ ) > 0: logger.warning( F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ''' use it for predictions and inference.''' ) else: logger.warning( F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'you can already use {pt_model.__class__.__name__} for predictions without further training.' ) return pt_model
3
0
from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
20
'''simple docstring''' import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint lowercase : Optional[int] = { '169M': 12, '430M': 24, '1B5': 24, '3B': 32, '7B': 32, '14B': 40, } lowercase : Optional[Any] = { '169M': 7_68, '430M': 10_24, '1B5': 20_48, '3B': 25_60, '7B': 40_96, '14B': 51_20, } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[Any] = list(state_dict.keys() ) for name in state_dict_keys: A : str = state_dict.pop(snake_case__ ) # emb -> embedding if name.startswith('''emb.''' ): A : Optional[Any] = name.replace('''emb.''' , '''embeddings.''' ) # ln_0 -> pre_ln (only present at block 0) if name.startswith('''blocks.0.ln0''' ): A : Union[str, Any] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' ) # att -> attention A : int = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , snake_case__ ) # ffn -> feed_forward A : List[Any] = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , snake_case__ ) # time_mix_k -> time_mix_key and reshape if name.endswith('''.time_mix_k''' ): A : List[str] = name.replace('''.time_mix_k''' , '''.time_mix_key''' ) # time_mix_v -> time_mix_value and reshape if name.endswith('''.time_mix_v''' ): A : Union[str, Any] = name.replace('''.time_mix_v''' , '''.time_mix_value''' ) # time_mix_r -> time_mix_key and reshape if name.endswith('''.time_mix_r''' ): A : Union[str, Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' ) if name != "head.weight": A : List[Any] = '''rwkv.''' + name A : Dict = weight return state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=None ): '''simple docstring''' if tokenizer_file is None: print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' ) A : int = 5_0277 A : Optional[int] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' ) else: A : str = PreTrainedTokenizerFast(tokenizer_file=snake_case__ ) A : Any = len(snake_case__ ) tokenizer.save_pretrained(snake_case__ ) # 2. Build the config A : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: A : List[str] = candidate break if size is None: raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' ) if size not in possible_sizes: raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' ) A : Any = RwkvConfig( vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(snake_case__ ) # 3. Download model file then convert state_dict A : Union[str, Any] = hf_hub_download(snake_case__ , snake_case__ ) A : Tuple = torch.load(snake_case__ , map_location='''cpu''' ) A : List[Any] = convert_state_dict(snake_case__ ) # 4. Split in shards and save A, A : List[str] = shard_checkpoint(snake_case__ ) for shard_file, shard in shards.items(): torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) if index is not None: A : Dict = os.path.join(snake_case__ , snake_case__ ) # Save the index as well with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: A : List[Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n''' f.write(snake_case__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( '''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' ) A : List[Any] = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: A : Union[str, Any] = torch.load(os.path.join(snake_case__ , snake_case__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' ) A : int = AutoModelForCausalLM.from_pretrained(snake_case__ ) model.push_to_hub(snake_case__ , max_shard_size='''2GB''' ) tokenizer.push_to_hub(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.' ) parser.add_argument( '--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.' ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='Where to save the converted model.' ) parser.add_argument( '--tokenizer_file', default=None, type=str, help='Path to the tokenizer file to use (if not provided, only the model is converted).', ) parser.add_argument( '--size', default=None, type=str, help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Push to the Hub the converted model.', ) parser.add_argument( '--model_name', default=None, type=str, help='Name of the pushed model on the Hub, including the username / organization.', ) lowercase : Union[str, Any] = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
3
0
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def UpperCAmelCase_ ( self : int ) -> List[str]: """simple docstring""" snake_case_ : Optional[int] = 0 def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" snake_case_ : int = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' ) self.assertIsInstance(_A , _A ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Dict = Path(_A ) / '''preprocessor_config.json''' snake_case_ : Optional[Any] = Path(_A ) / '''config.json''' json.dump( {'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w' ) , ) json.dump({'model_type': 'clip'} , open(_A , 'w' ) ) snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained(_A ) self.assertIsInstance(_A , _A ) def UpperCAmelCase_ ( self : Optional[int] ) -> str: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : int = Path(_A ) / '''preprocessor_config.json''' snake_case_ : Tuple = Path(_A ) / '''config.json''' json.dump( {'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w' ) , ) json.dump({'model_type': 'clip'} , open(_A , 'w' ) ) snake_case_ : List[str] = AutoImageProcessor.from_pretrained(_A ) self.assertIsInstance(_A , _A ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : List[Any] = CLIPConfig() # Create a dummy config file with image_proceesor_type snake_case_ : Any = Path(_A ) / '''preprocessor_config.json''' snake_case_ : List[Any] = Path(_A ) / '''config.json''' json.dump( {'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w' ) , ) json.dump({'model_type': 'clip'} , open(_A , 'w' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally snake_case_ : List[str] = AutoImageProcessor.from_pretrained(_A ).to_dict() config_dict.pop('image_processor_type' ) snake_case_ : List[str] = CLIPImageProcessor(**_A ) # save in new folder model_config.save_pretrained(_A ) config.save_pretrained(_A ) snake_case_ : Optional[int] = AutoImageProcessor.from_pretrained(_A ) # make sure private variable is not incorrectly saved snake_case_ : int = json.loads(config.to_json_string() ) self.assertTrue('_processor_class' not in dict_as_saved ) self.assertIsInstance(_A , _A ) def UpperCAmelCase_ ( self : List[Any] ) -> Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Optional[Any] = Path(_A ) / '''preprocessor_config.json''' json.dump( {'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w' ) , ) snake_case_ : Tuple = AutoImageProcessor.from_pretrained(_A ) self.assertIsInstance(_A , _A ) def UpperCAmelCase_ ( self : Any ) -> Tuple: """simple docstring""" with self.assertRaisesRegex( _A , 'clip-base is not a local folder and is not a valid model identifier' ): snake_case_ : Union[str, Any] = AutoImageProcessor.from_pretrained('clip-base' ) def UpperCAmelCase_ ( self : str ) -> Any: """simple docstring""" with self.assertRaisesRegex( _A , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): snake_case_ : str = AutoImageProcessor.from_pretrained(_A , revision='aaaaaa' ) def UpperCAmelCase_ ( self : List[str] ) -> str: """simple docstring""" with self.assertRaisesRegex( _A , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ): snake_case_ : Tuple = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' ) def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]: """simple docstring""" with self.assertRaises(_A ): snake_case_ : Dict = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_A ): snake_case_ : Union[str, Any] = AutoImageProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A ) snake_case_ : List[Any] = AutoImageProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A ) self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_A ) snake_case_ : Union[str, Any] = AutoImageProcessor.from_pretrained(_A , trust_remote_code=_A ) self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor' ) def UpperCAmelCase_ ( self : List[str] ) -> Tuple: """simple docstring""" try: AutoConfig.register('custom' , _A ) AutoImageProcessor.register(_A , _A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): AutoImageProcessor.register(_A , _A ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Any = Path(_A ) / '''preprocessor_config.json''' snake_case_ : Any = Path(_A ) / '''config.json''' json.dump( {'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w' ) , ) json.dump({'model_type': 'clip'} , open(_A , 'w' ) ) snake_case_ : Any = CustomImageProcessor.from_pretrained(_A ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_A ) snake_case_ : List[str] = AutoImageProcessor.from_pretrained(_A ) self.assertIsInstance(_A , _A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def UpperCAmelCase_ ( self : Optional[Any] ) -> Any: """simple docstring""" class SCREAMING_SNAKE_CASE_ ( __snake_case ): __magic_name__: Optional[int] = True try: AutoConfig.register('custom' , _A ) AutoImageProcessor.register(_A , _A ) # If remote code is not set, the default is to use local snake_case_ : List[Any] = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' ) self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. snake_case_ : Union[str, Any] = AutoImageProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A ) self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub snake_case_ : int = AutoImageProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A ) self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' ) self.assertTrue(not hasattr(_A , 'is_local' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
327
'''simple docstring''' from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING lowercase : str = logging.get_logger(__name__) @add_end_docstrings(__snake_case ) class A ( __snake_case ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) self.check_model_type(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A, A : Dict = {}, {} if padding is not None: A : List[str] = padding if truncation is not None: A : Dict = truncation if top_k is not None: A : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : int = {'''image''': image, '''question''': question} else: A : Any = image A : Any = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) return results def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" A : Union[str, Any] = load_image(inputs['''image'''] ) A : Optional[Any] = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE ) A : Dict = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(SCREAMING_SNAKE_CASE ) return model_inputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : List[Any] = self.model(**SCREAMING_SNAKE_CASE ) return model_outputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> int: """simple docstring""" if top_k > self.model.config.num_labels: A : Dict = self.model.config.num_labels if self.framework == "pt": A : Optional[int] = model_outputs.logits.sigmoid()[0] A, A : int = probs.topk(SCREAMING_SNAKE_CASE ) else: raise ValueError(F'Unsupported framework: {self.framework}' ) A : int = scores.tolist() A : List[str] = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
3
0
"""simple docstring""" import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow _snake_case = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ 'text-classification', 'language-modeling', 'summarization', 'token-classification', 'question-answering', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) _snake_case = logging.getLogger() def lowerCAmelCase__ ( ): '''simple docstring''' _a : Tuple = argparse.ArgumentParser() parser.add_argument("""-f""" ) _a : str = parser.parse_args() return args.f def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__="eval" ): '''simple docstring''' _a : str = os.path.join(snake_case__ , F"""{split}_results.json""" ) if os.path.exists(snake_case__ ): with open(snake_case__ , """r""" ) as f: return json.load(snake_case__ ) raise ValueError(F"""can\'t find {path}""" ) _snake_case = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class UpperCamelCase ( __snake_case ): def _lowercase ( self : List[str] ) -> Optional[Any]: _a : List[Any] = self.get_auto_remove_tmp_dir() _a : int = f"""\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n """.split() with patch.object(UpperCAmelCase__ , """argv""" , UpperCAmelCase__ ): run_flax_glue.main() _a : Any = get_results(UpperCAmelCase__ ) self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 ) @slow def _lowercase ( self : int ) -> Union[str, Any]: _a : str = self.get_auto_remove_tmp_dir() _a : Dict = f"""\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n """.split() with patch.object(UpperCAmelCase__ , """argv""" , UpperCAmelCase__ ): run_clm_flax.main() _a : Optional[Any] = get_results(UpperCAmelCase__ ) self.assertLess(result["""eval_perplexity"""] , 100 ) @slow def _lowercase ( self : Dict ) -> Any: _a : int = self.get_auto_remove_tmp_dir() _a : Optional[Any] = f"""\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n """.split() with patch.object(UpperCAmelCase__ , """argv""" , UpperCAmelCase__ ): run_summarization_flax.main() _a : Any = get_results(UpperCAmelCase__ , split="""test""" ) self.assertGreaterEqual(result["""test_rouge1"""] , 10 ) self.assertGreaterEqual(result["""test_rouge2"""] , 2 ) self.assertGreaterEqual(result["""test_rougeL"""] , 7 ) self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 ) @slow def _lowercase ( self : str ) -> Union[str, Any]: _a : int = self.get_auto_remove_tmp_dir() _a : Optional[int] = f"""\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n """.split() with patch.object(UpperCAmelCase__ , """argv""" , UpperCAmelCase__ ): run_mlm_flax.main() _a : str = get_results(UpperCAmelCase__ ) self.assertLess(result["""eval_perplexity"""] , 42 ) @slow def _lowercase ( self : Optional[int] ) -> Optional[Any]: _a : Optional[Any] = self.get_auto_remove_tmp_dir() _a : List[Any] = f"""\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n """.split() with patch.object(UpperCAmelCase__ , """argv""" , UpperCAmelCase__ ): run_ta_mlm_flax.main() _a : Optional[Any] = get_results(UpperCAmelCase__ ) self.assertGreaterEqual(result["""eval_accuracy"""] , 0.4_2 ) @slow def _lowercase ( self : str ) -> str: _a : List[str] = 7 if get_gpu_count() > 1 else 2 _a : Optional[int] = self.get_auto_remove_tmp_dir() _a : Tuple = f"""\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n """.split() with patch.object(UpperCAmelCase__ , """argv""" , UpperCAmelCase__ ): run_flax_ner.main() _a : str = get_results(UpperCAmelCase__ ) self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 ) self.assertGreaterEqual(result["""eval_f1"""] , 0.3 ) @slow def _lowercase ( self : int ) -> Dict: _a : Any = self.get_auto_remove_tmp_dir() _a : List[str] = f"""\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n """.split() with patch.object(UpperCAmelCase__ , """argv""" , UpperCAmelCase__ ): run_qa.main() _a : List[Any] = get_results(UpperCAmelCase__ ) self.assertGreaterEqual(result["""eval_f1"""] , 30 ) self.assertGreaterEqual(result["""eval_exact"""] , 30 )
294
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase : Union[str, Any] = logging.get_logger(__name__) lowercase : str = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class A ( __snake_case ): __magic_name__ = '''bert''' def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = vocab_size A : Optional[Any] = hidden_size A : List[Any] = num_hidden_layers A : List[str] = num_attention_heads A : Dict = hidden_act A : Optional[Any] = intermediate_size A : List[Any] = hidden_dropout_prob A : List[Any] = attention_probs_dropout_prob A : Optional[Any] = max_position_embeddings A : List[str] = type_vocab_size A : Dict = initializer_range A : str = layer_norm_eps A : int = position_embedding_type A : Dict = use_cache A : str = classifier_dropout class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A : Optional[int] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
3
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _a : Any = { 'configuration_squeezebert': [ 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SqueezeBertConfig', 'SqueezeBertOnnxConfig', ], 'tokenization_squeezebert': ['SqueezeBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Any = ['SqueezeBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Optional[Any] = [ 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'SqueezeBertForMaskedLM', 'SqueezeBertForMultipleChoice', 'SqueezeBertForQuestionAnswering', 'SqueezeBertForSequenceClassification', 'SqueezeBertForTokenClassification', 'SqueezeBertModel', 'SqueezeBertModule', 'SqueezeBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys _a : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
44
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : str = BeautifulSoup(requests.get(snake_case__ , params=snake_case__ ).content , '''html.parser''' ) A : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) A : Optional[int] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": lowercase : str = { 'title': ( 'Precisely geometry controlled microsupercapacitors for ultrahigh areal ' 'capacitance, volumetric capacitance, and energy density' ), 'journal': 'Chem. Mater.', 'volume': 30, 'pages': '3979-3990', 'year': 20_18, 'hl': 'en', } print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
3
0
from __future__ import annotations from typing import Any class lowercase__ ( __snake_case): pass class lowercase__ : def __init__( self : str , UpperCamelCase__ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = data SCREAMING_SNAKE_CASE : Node | None = None def __iter__( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self SCREAMING_SNAKE_CASE : Union[str, Any] = [] while node: if node in visited: raise ContainsLoopError visited.append(UpperCamelCase__ ) yield node.data SCREAMING_SNAKE_CASE : Optional[int] = node.next_node @property def __A ( self : List[str] ): '''simple docstring''' try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": __UpperCamelCase : int = Node(1) __UpperCamelCase : Union[str, Any] = Node(2) __UpperCamelCase : List[str] = Node(3) __UpperCamelCase : Any = Node(4) print(root_node.has_loop) # False __UpperCamelCase : List[Any] = root_node.next_node print(root_node.has_loop) # True __UpperCamelCase : Optional[Any] = Node(5) __UpperCamelCase : Dict = Node(6) __UpperCamelCase : List[Any] = Node(5) __UpperCamelCase : List[str] = Node(6) print(root_node.has_loop) # False __UpperCamelCase : Tuple = Node(1) print(root_node.has_loop) # False
182
'''simple docstring''' class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Any = None A : Optional[Any] = None A : Tuple = graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Dict = len(SCREAMING_SNAKE_CASE ) A : Optional[Any] = None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if sources is int: A : Dict = [sources] if sinks is int: A : str = [sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return A : Optional[int] = sources[0] A : Union[str, Any] = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: A : Optional[int] = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A : Dict = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A : Dict = max_input_flow A : Tuple = 0 A : Tuple = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A : Optional[Any] = max_input_flow A : Optional[Any] = size - 1 def __lowerCAmelCase ( self ) -> Any: """simple docstring""" if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : List[Any] = algorithm(self ) class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = flow_network A : Optional[Any] = flow_network.verticesCount A : Tuple = flow_network.sourceIndex A : Dict = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A : str = flow_network.graph A : Optional[Any] = False def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" if not self.executed: self._algorithm() A : Optional[int] = True def __lowerCAmelCase ( self ) -> Any: """simple docstring""" pass class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result A : List[str] = -1 def __lowerCAmelCase ( self ) -> str: """simple docstring""" if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) A : Optional[Any] = [[0] * self.verticies_count for i in range(self.verticies_count )] A : Union[str, Any] = [0] * self.verticies_count A : List[Any] = [0] * self.verticies_count def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A : Optional[Any] = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A : Union[str, Any] = 0 while i < len(SCREAMING_SNAKE_CASE ): A : str = vertices_list[i] A : List[str] = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) A : int = 0 else: i += 1 A : Optional[Any] = sum(self.preflow[self.source_index] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Dict = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Dict = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A : Dict = self.heights[to_index] if min_height is not None: A : Dict = min_height + 1 if __name__ == "__main__": lowercase : Optional[int] = [0] lowercase : List[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowercase : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowercase : List[str] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowercase : List[str] = flow_network.find_maximum_flow() print(f'''maximum flow is {maximum_flow}''')
3
0
'''simple docstring''' def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ): """simple docstring""" print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' ) for i in range(snake_case__ ): for j in range(snake_case__ ): if dist[i][j] != float('''inf''' ): print(int(dist[i][j] ) , end='''\t''' ) else: print('''INF''' , end='''\t''' ) print() def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" lowercase_ : Optional[Any] = [[float('''inf''' ) for _ in range(snake_case__ )] for _ in range(snake_case__ )] for i in range(snake_case__ ): for j in range(snake_case__ ): lowercase_ : List[Any] = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(snake_case__ ): # looping through rows of graph array for i in range(snake_case__ ): # looping through columns of graph array for j in range(snake_case__ ): if ( dist[i][k] != float('''inf''' ) and dist[k][j] != float('''inf''' ) and dist[i][k] + dist[k][j] < dist[i][j] ): lowercase_ : List[str] = dist[i][k] + dist[k][j] _print_dist(snake_case__ , snake_case__ ) return dist, v if __name__ == "__main__": _lowercase : Dict = int(input("Enter number of vertices: ")) _lowercase : Union[str, Any] = int(input("Enter number of edges: ")) _lowercase : int = [[float("inf") for i in range(v)] for j in range(v)] for i in range(v): _lowercase : Any = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print("\nEdge ", i + 1) _lowercase : Union[str, Any] = int(input("Enter source:")) _lowercase : Optional[int] = int(input("Enter destination:")) _lowercase : Tuple = float(input("Enter weight:")) _lowercase : List[str] = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
93
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ = 10 ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ) or n < 0: raise ValueError('''Invalid input''' ) A : List[str] = 10**n A : Tuple = 2_8433 * (pow(2 , 783_0457 , snake_case__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(10) = }''')
3
0
from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __lowerCamelCase : Optional[int] = logging.get_logger(__name__) class __snake_case ( __snake_case ): lowerCAmelCase_ = ["input_features", "attention_mask"] def __init__( self : Tuple , _lowercase : List[Any]=80 , _lowercase : str=1_60_00 , _lowercase : str=80 , _lowercase : Union[str, Any]=0.0 , _lowercase : Any=True , _lowercase : str=True , _lowercase : Optional[Any]=True , **_lowercase : str , ): """simple docstring""" super().__init__(feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , **_lowercase ) SCREAMING_SNAKE_CASE__ = num_mel_bins SCREAMING_SNAKE_CASE__ = do_ceptral_normalize SCREAMING_SNAKE_CASE__ = normalize_means SCREAMING_SNAKE_CASE__ = normalize_vars SCREAMING_SNAKE_CASE__ = True def __a ( self : int , _lowercase : Optional[Any] , ): """simple docstring""" SCREAMING_SNAKE_CASE__ = waveform * (2**15) # Kaldi compliance: 16-bit signed integers SCREAMING_SNAKE_CASE__ = torch.from_numpy(_lowercase ).unsqueeze(0 ) SCREAMING_SNAKE_CASE__ = ta_kaldi.fbank(_lowercase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def __a ( _lowercase : Any , _lowercase : List[str] , _lowercase : List[str] = True , _lowercase : Tuple = True , _lowercase : Optional[Any] = 0.0 , ): """simple docstring""" if normalize_means: SCREAMING_SNAKE_CASE__ = x[:input_length].mean(axis=0 ) SCREAMING_SNAKE_CASE__ = np.subtract(_lowercase , _lowercase ) if normalize_vars: SCREAMING_SNAKE_CASE__ = x[:input_length].std(axis=0 ) SCREAMING_SNAKE_CASE__ = np.divide(_lowercase , _lowercase ) if input_length < x.shape[0]: SCREAMING_SNAKE_CASE__ = padding_value # make sure array is in float32 SCREAMING_SNAKE_CASE__ = x.astype(np.floataa ) return x def __a ( self : Tuple , _lowercase : Any , _lowercase : List[Any] = None ): """simple docstring""" SCREAMING_SNAKE_CASE__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(_lowercase , _lowercase , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(_lowercase , _lowercase ) ] def __call__( self : Optional[Any] , _lowercase : List[Any] , _lowercase : List[Any] = False , _lowercase : str = None , _lowercase : Union[str, Any] = False , _lowercase : Any = None , _lowercase : Tuple = None , _lowercase : Optional[Any] = None , _lowercase : Union[str, Any] = None , **_lowercase : Optional[int] , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) SCREAMING_SNAKE_CASE__ = isinstance(_lowercase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) SCREAMING_SNAKE_CASE__ = is_batched_numpy or ( isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE__ = [np.asarray(_lowercase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_lowercase , np.ndarray ): SCREAMING_SNAKE_CASE__ = np.asarray(_lowercase , dtype=np.floataa ) elif isinstance(_lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE__ = [raw_speech] # extract fbank features SCREAMING_SNAKE_CASE__ = [self._extract_fbank_features(_lowercase ) for waveform in raw_speech] # convert into correct format for padding SCREAMING_SNAKE_CASE__ = BatchFeature({"""input_features""": features} ) SCREAMING_SNAKE_CASE__ = self.pad( _lowercase , padding=_lowercase , max_length=_lowercase , truncation=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , **_lowercase , ) # make sure list is in array format SCREAMING_SNAKE_CASE__ = padded_inputs.get("""input_features""" ) if isinstance(input_features[0] , _lowercase ): SCREAMING_SNAKE_CASE__ = [np.asarray(_lowercase , dtype=np.floataa ) for feature in input_features] SCREAMING_SNAKE_CASE__ = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: SCREAMING_SNAKE_CASE__ = [np.asarray(_lowercase , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: SCREAMING_SNAKE_CASE__ = ( np.array(_lowercase , dtype=np.intaa ) if self._get_padding_strategies(_lowercase , max_length=_lowercase ) is not PaddingStrategy.DO_NOT_PAD else None ) SCREAMING_SNAKE_CASE__ = self.normalize( padded_inputs["""input_features"""] , attention_mask=_lowercase ) if return_tensors is not None: SCREAMING_SNAKE_CASE__ = padded_inputs.convert_to_tensors(_lowercase ) return padded_inputs
219
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowercase : List[str] = logging.get_logger(__name__) lowercase : str = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class A ( __snake_case ): __magic_name__ = '''gpt_neo''' __magic_name__ = ['''past_key_values'''] __magic_name__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , SCREAMING_SNAKE_CASE=50257 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=24 , SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=50256 , SCREAMING_SNAKE_CASE=50256 , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" A : Union[str, Any] = vocab_size A : Optional[Any] = max_position_embeddings A : Dict = hidden_size A : Optional[Any] = num_layers A : Tuple = num_heads A : int = intermediate_size A : Optional[Any] = window_size A : List[Any] = activation_function A : Union[str, Any] = resid_dropout A : Any = embed_dropout A : List[Any] = attention_dropout A : str = classifier_dropout A : List[Any] = layer_norm_epsilon A : str = initializer_range A : List[str] = use_cache A : Optional[int] = bos_token_id A : List[Any] = eos_token_id A : int = attention_types A : int = self.expand_attention_types_params(SCREAMING_SNAKE_CASE ) if len(self.attention_layers ) != self.num_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.attention_layers)` == `config.num_layers` ''' F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, ' F'`config.num_layers = {self.num_layers}`. ' '''`config.attention_layers` is prepared using `config.attention_types`. ''' '''Please verify the value of `config.attention_types` argument.''' ) super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : List[str] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' import torch A : Tuple = input.size() A : Union[str, Any] = len(snake_case__ ) A : List[str] = shape[dimension] A : Union[str, Any] = torch.arange(0 , snake_case__ , snake_case__ ) A : List[str] = torch.div(sizedim - size , snake_case__ , rounding_mode='''floor''' ) + 1 A : Optional[int] = torch.arange(snake_case__ ) + low_indices[:min_length][:, None] A : str = [slice(snake_case__ )] * rank A : List[Any] = indices A : Union[str, Any] = input[s] A : List[str] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch A : List[str] = torch.arange(1 , snake_case__ ) A : Optional[int] = torch.remainder(snake_case__ , snake_case__ ) A : Optional[int] = remainders == 0 A : Optional[Any] = candidates[divisor_indices] A : Optional[int] = torch.max(snake_case__ ) return largest_divisor, torch.div(snake_case__ , snake_case__ , rounding_mode='''floor''' ) class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" A : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction='''inputs''' ) A : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: A : Dict = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return self._config.num_heads def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]: """simple docstring""" A : List[str] = super(SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs( SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() A : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch A, A : Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values A : str = seqlen + 2 A : List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A : Any = [ (torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] A : str = common_inputs['''attention_mask'''] if self.use_past: A : Optional[int] = ordered_inputs['''attention_mask'''].dtype A : List[str] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 ) return ordered_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return 13
3
0
import functools from typing import Any def lowerCamelCase__ ( __lowerCAmelCase : Any , __lowerCAmelCase : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ) or len(snake_case__ ) == 0: raise ValueError("the string should be not empty string" ) if not isinstance(snake_case__ , snake_case__ ) or not all( isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) > 0 for item in words ): raise ValueError("the words should be a list of non-empty strings" ) # Build trie lowerCAmelCase_ = {} lowerCAmelCase_ = '''WORD_KEEPER''' for word in words: lowerCAmelCase_ = trie for c in word: if c not in trie_node: lowerCAmelCase_ = {} lowerCAmelCase_ = trie_node[c] lowerCAmelCase_ = True lowerCAmelCase_ = len(snake_case__ ) # Dynamic programming method @functools.cache def is_breakable(__lowerCAmelCase : Dict ) -> bool: if index == len_string: return True lowerCAmelCase_ = trie for i in range(snake_case__ , snake_case__ ): lowerCAmelCase_ = trie_node.get(string[i] , snake_case__ ) if trie_node is None: return False if trie_node.get(snake_case__ , snake_case__ ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
231
'''simple docstring''' import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = [] A : Union[str, Any] = [] for i in range(self.num_layers ): A : Any = self.in_channels if i == 0 else self.out_channels A : Optional[Any] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Optional[int] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnets A : Union[str, Any] = attentions if self.add_downsample: A : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Union[str, Any]: """simple docstring""" A : Optional[Any] = () for resnet, attn in zip(self.resnets , self.attentions ): A : int = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Dict = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = [] for i in range(self.num_layers ): A : Optional[Any] = self.in_channels if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets if self.add_downsample: A : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]: """simple docstring""" A : str = () for resnet in self.resnets: A : Optional[int] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[int] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[Any] = [] A : Optional[int] = [] for i in range(self.num_layers ): A : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : Dict = self.prev_output_channel if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : int = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets A : Optional[Any] = attentions if self.add_upsample: A : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[int]: """simple docstring""" for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states A : List[str] = res_hidden_states_tuple[-1] A : int = res_hidden_states_tuple[:-1] A : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Tuple = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : Dict = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : int = [] for i in range(self.num_layers ): A : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : List[str] = self.prev_output_channel if i == 0 else self.out_channels A : str = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[Any] = resnets if self.add_upsample: A : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Tuple: """simple docstring""" for resnet in self.resnets: # pop res hidden states A : Optional[int] = res_hidden_states_tuple[-1] A : Optional[Any] = res_hidden_states_tuple[:-1] A : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Optional[Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : List[str] = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : str = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] A : List[Any] = [] for _ in range(self.num_layers ): A : int = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[str] = resnets A : List[str] = attentions def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict: """simple docstring""" A : Optional[Any] = self.resnets[0](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): A : Optional[int] = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) return hidden_states
3
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]: """simple docstring""" return int((input_a, input_a).count(0 ) == 0 ) def _SCREAMING_SNAKE_CASE () -> Dict: """simple docstring""" assert and_gate(0 , 0 ) == 0 assert and_gate(0 , 1 ) == 0 assert and_gate(1 , 0 ) == 0 assert and_gate(1 , 1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
2
'''simple docstring''' import os def lowerCAmelCase_ ( ): '''simple docstring''' A : List[Any] = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' ) with open(snake_case__ ) as file_hand: return str(sum(int(snake_case__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
3
0
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[Any]: if divisor % 5 == 0 or divisor % 2 == 0: return 0 lowerCAmelCase = 1 lowerCAmelCase = 1 while repunit: lowerCAmelCase = (1_0 * repunit + 1) % divisor repunit_index += 1 return repunit_index def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0_0_0_0_0 ) -> int: lowerCAmelCase = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(snake_case__ ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f'{solution() = }')
338
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , snake_case__ ) A : Dict = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: A : Dict = dataset_size < in_memory_max_size else: A : Tuple = False A : int = is_small_dataset(snake_case__ ) assert result == expected
3
0