code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "▁"
UpperCAmelCase__ = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase__ = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase__ = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any]="<s>" , __UpperCAmelCase : Optional[int]="</s>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : int="<s>" , __UpperCAmelCase : Union[str, Any]="<unk>" , __UpperCAmelCase : Optional[Any]="<pad>" , __UpperCAmelCase : str="<mask>" , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Any , ) ->None:
"""simple docstring"""
a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
a = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a = 1
a = len(self.sp_model ) + self.fairseq_offset
a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[str] ) ->Optional[int]:
"""simple docstring"""
a = self.__dict__.copy()
a = None
a = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , __UpperCAmelCase : Tuple ) ->Optional[Any]:
"""simple docstring"""
a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a = [self.cls_token_id]
a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self : str , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
a = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Optional[int] ) ->Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a = self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
a = ''''''.join(__UpperCAmelCase ).replace(__UpperCAmelCase , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = """realm"""
def __init__(self : str , UpperCamelCase : List[Any]=30522 , UpperCamelCase : List[Any]=768 , UpperCamelCase : int=128 , UpperCamelCase : Any=12 , UpperCamelCase : Tuple=12 , UpperCamelCase : List[Any]=8 , UpperCamelCase : Union[str, Any]=3072 , UpperCamelCase : List[str]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Dict=512 , UpperCamelCase : Dict=2 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : List[Any]=1E-12 , UpperCamelCase : Dict=256 , UpperCamelCase : Union[str, Any]=10 , UpperCamelCase : Optional[int]=1E-3 , UpperCamelCase : Tuple=5 , UpperCamelCase : Optional[int]=320 , UpperCamelCase : List[str]=13353718 , UpperCamelCase : Optional[Any]=5000 , UpperCamelCase : str=1 , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : List[Any]=2 , **UpperCamelCase : int , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
# Common config
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = retriever_proj_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_candidates
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = type_vocab_size
lowercase__ = layer_norm_eps
# Reader config
lowercase__ = span_hidden_size
lowercase__ = max_span_width
lowercase__ = reader_layer_norm_eps
lowercase__ = reader_beam_size
lowercase__ = reader_seq_len
# Retrieval config
lowercase__ = num_block_records
lowercase__ = searcher_beam_size
| 2 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __A ( unittest.TestCase ):
a__ : Union[str, Any] = StableDiffusionLDMaDPipeline
a__ : str = TEXT_TO_IMAGE_PARAMS
a__ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
def _lowercase (self : int ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(__a )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _lowercase (self : Any , __a : str , __a : Any=0 ):
if str(__a ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(__a )
else:
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(__a )
UpperCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _lowercase (self : List[str] ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableDiffusionLDMaDPipeline(**__a )
UpperCAmelCase_ = ldmad_pipe.to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = ldmad_pipe(**__a )
UpperCAmelCase_ , UpperCAmelCase_ = output.rgb, output.depth
UpperCAmelCase_ = rgb[0, -3:, -3:, -1]
UpperCAmelCase_ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCAmelCase_ = np.array(
[0.37_33_81_76, 0.7_02_47, 0.74_20_31_93, 0.51_64_36_04, 0.58_25_67_93, 0.60_93_21_36, 0.4_18_10_95, 0.48_35_58_77, 0.46_53_52_62] )
UpperCAmelCase_ = np.array([1_03.4_67_27, 85.81_20_04, 87.84_92_36] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableDiffusionLDMaDPipeline(**__a )
UpperCAmelCase_ = ldmad_pipe.to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ = ldmad_pipe(**__a )
UpperCAmelCase_ , UpperCAmelCase_ = output.rgb, output.depth
UpperCAmelCase_ = rgb_slice_a[0, -3:, -3:, -1]
UpperCAmelCase_ = depth_slice_a[0, -3:, -1]
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ = ldmad_pipe.tokenizer(
__a , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__a , return_tensors="pt" , )
UpperCAmelCase_ = text_inputs["input_ids"].to(__a )
UpperCAmelCase_ = ldmad_pipe.text_encoder(__a )[0]
UpperCAmelCase_ = prompt_embeds
# forward
UpperCAmelCase_ = ldmad_pipe(**__a )
UpperCAmelCase_ , UpperCAmelCase_ = output.rgb, output.depth
UpperCAmelCase_ = rgb_slice_a[0, -3:, -3:, -1]
UpperCAmelCase_ = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = PNDMScheduler(skip_prk_steps=__a )
UpperCAmelCase_ = StableDiffusionLDMaDPipeline(**__a )
UpperCAmelCase_ = ldmad_pipe.to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = "french fries"
UpperCAmelCase_ = ldmad_pipe(**__a , negative_prompt=__a )
UpperCAmelCase_ , UpperCAmelCase_ = output.rgb, output.depth
UpperCAmelCase_ = rgb[0, -3:, -3:, -1]
UpperCAmelCase_ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCAmelCase_ = np.array(
[0.3_70_44, 0.71_81_15_03, 0.7_22_32_51, 0.48_60_36_75, 0.5_63_83_91, 0.6_36_49_48, 0.42_83_37_04, 0.4_90_13_15, 0.47_92_62_17] )
UpperCAmelCase_ = np.array([1_07.8_47_38, 84.6_28_02, 89.96_21_35] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Union[str, Any] , __a : List[Any] , __a : int="cpu" , __a : Dict=torch.floataa , __a : Dict=0 ):
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(__a )
UpperCAmelCase_ = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
UpperCAmelCase_ = torch.from_numpy(__a ).to(device=__a , dtype=__a )
UpperCAmelCase_ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _lowercase (self : Tuple ):
UpperCAmelCase_ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
UpperCAmelCase_ = ldmad_pipe.to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_inputs(__a )
UpperCAmelCase_ = ldmad_pipe(**__a )
UpperCAmelCase_ , UpperCAmelCase_ = output.rgb, output.depth
UpperCAmelCase_ = rgb[0, -3:, -3:, -1].flatten()
UpperCAmelCase_ = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
UpperCAmelCase_ = np.array(
[0.53_80_54_65, 0.56_70_73_05, 0.5_48_65_15, 0.57_01_22_36, 0.5_81_45_11, 0.56_25_34_87, 0.54_84_30_14, 0.55_09_22_63, 0.6_45_97_06] )
UpperCAmelCase_ = np.array(
[0.9_26_37_81, 0.6_67_86_72, 0.5_48_65_15, 0.92_20_21_45, 0.67_83_11_35, 0.56_25_34_87, 0.9_24_16_94, 0.7_55_14_78, 0.6_45_97_06] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : int , __a : Union[str, Any] , __a : List[str]="cpu" , __a : List[str]=torch.floataa , __a : Tuple=0 ):
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(__a )
UpperCAmelCase_ = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
UpperCAmelCase_ = torch.from_numpy(__a ).to(device=__a , dtype=__a )
UpperCAmelCase_ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_inputs(__a )
UpperCAmelCase_ = ldmad_pipe(**__a )
UpperCAmelCase_ , UpperCAmelCase_ = output.rgb, output.depth
UpperCAmelCase_ = 0.49_55_86
UpperCAmelCase_ = 0.33_79_55_15
UpperCAmelCase_ = 1_12.4_85_18
UpperCAmelCase_ = 98.48_97_46
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def _lowercase (self : Any ):
UpperCAmelCase_ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_inputs(__a )
UpperCAmelCase_ = ldmad_pipe(**__a )
UpperCAmelCase_ , UpperCAmelCase_ = output.rgb, output.depth
UpperCAmelCase_ = 0.4_19_41_27
UpperCAmelCase_ = 0.35_37_55_86
UpperCAmelCase_ = 0.5_63_85_02
UpperCAmelCase_ = 0.34_68_61_03
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 1 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : int = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = """mvp"""
lowerCAmelCase__ : Optional[Any] = ["""past_key_values"""]
lowerCAmelCase__ : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self : Any , UpperCamelCase : Optional[int]=50267 , UpperCamelCase : Tuple=1024 , UpperCamelCase : int=12 , UpperCamelCase : Tuple=4096 , UpperCamelCase : Dict=16 , UpperCamelCase : int=12 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : Optional[int]=16 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Union[str, Any]=1024 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : str=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : List[str]=False , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=1 , UpperCamelCase : int=0 , UpperCamelCase : int=2 , UpperCamelCase : Any=True , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Tuple=False , UpperCamelCase : int=100 , UpperCamelCase : Optional[Any]=800 , **UpperCamelCase : str , ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = classifier_dropout
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = use_prompt
lowercase__ = prompt_length
lowercase__ = prompt_mid_dim
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , **UpperCamelCase , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , UpperCamelCase ):
lowercase__ = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
'''The config can simply be saved and uploaded again to be fixed.''' )
| 2 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : str = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class A ( __snake_case ):
__magic_name__ = '''bert'''
def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : Optional[int] = vocab_size
A : Optional[Any] = hidden_size
A : List[Any] = num_hidden_layers
A : List[str] = num_attention_heads
A : Dict = hidden_act
A : Optional[Any] = intermediate_size
A : List[Any] = hidden_dropout_prob
A : List[Any] = attention_probs_dropout_prob
A : Optional[Any] = max_position_embeddings
A : List[str] = type_vocab_size
A : Dict = initializer_range
A : str = layer_norm_eps
A : int = position_embedding_type
A : Dict = use_cache
A : str = classifier_dropout
class A ( __snake_case ):
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 3 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : List[str] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : int = DebertaVaTokenizer
lowerCAmelCase__ : List[Any] = DebertaVaTokenizerFast
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Tuple = True
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = DebertaVaTokenizer(UpperCamelCase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = '''this is a test'''
lowercase__ = '''this is a test'''
return input_text, output_text
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''<pad>'''
lowercase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(UpperCamelCase ) , 30001 )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = ''' \tHeLLo!how \n Are yoU? '''
lowercase__ = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = ''' \tHeLLo!how \n Are yoU? '''
lowercase__ = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = '''This is a test'''
lowercase__ = [13, 1, 4398, 25, 21, 1289]
lowercase__ = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__ = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__ = DebertaVaTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , keep_accents=UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# fmt: off
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase__ = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = DebertaVaTokenizer(UpperCamelCase )
lowercase__ = tokenizer.encode('''sequence builders''' )
lowercase__ = tokenizer.encode('''multi-sequence build''' )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase , )
@slow
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 2 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case ={
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 4 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _SCREAMING_SNAKE_CASE (A ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A , A )
def _SCREAMING_SNAKE_CASE (A ) -> List[str]:
"""simple docstring"""
lowercase__ ,lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(A , A , bias=A )
lowercase__ = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE (A , A="facebook/mbart-large-en-ro" , A=False , A=False ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = torch.load(A , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A )
lowercase__ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase__ = MBartConfig.from_pretrained(A , vocab_size=A )
if mbart_aa and finetuned:
lowercase__ = '''relu'''
lowercase__ = state_dict['''decoder.embed_tokens.weight''']
lowercase__ = MBartForConditionalGeneration(A )
model.model.load_state_dict(A )
if finetuned:
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
lowerCamelCase : Any = parser.parse_args()
lowerCamelCase : List[str] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 2 | 0 |
import itertools
import string
from collections.abc import Generator, Iterable
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Generator[tuple[str, ...], None, None]:
"""simple docstring"""
_lowercase =iter(__snake_case )
while True:
_lowercase =tuple(itertools.islice(__snake_case , __snake_case ) )
if not chunk:
return
yield chunk
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
_lowercase =''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
_lowercase =''''''
if len(__snake_case ) < 2:
return dirty
for i in range(len(__snake_case ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__snake_case ) & 1:
clean += "X"
return clean
def UpperCAmelCase_ ( __snake_case ) -> list[str]:
"""simple docstring"""
_lowercase ='''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_lowercase =[]
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__snake_case )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__snake_case )
return table
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> str:
"""simple docstring"""
_lowercase =generate_table(__snake_case )
_lowercase =prepare_input(__snake_case )
_lowercase =''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__snake_case , 2 ):
_lowercase , _lowercase =divmod(table.index(__snake_case ) , 5 )
_lowercase , _lowercase =divmod(table.index(__snake_case ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> str:
"""simple docstring"""
_lowercase =generate_table(__snake_case )
_lowercase =''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__snake_case , 2 ):
_lowercase , _lowercase =divmod(table.index(__snake_case ) , 5 )
_lowercase , _lowercase =divmod(table.index(__snake_case ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 5 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowerCamelCase : List[Any] = logging.getLogger(__name__)
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCamelCase : Any=-1 ):
'''simple docstring'''
lowercase__ = label_idx
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[Split, str] ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ):
lowercase__ = mode.value
lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" )
lowercase__ = 1
lowercase__ = []
with open(UpperCamelCase , encoding='''utf-8''' ) as f:
lowercase__ = []
lowercase__ = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) )
guid_index += 1
lowercase__ = []
lowercase__ = []
else:
lowercase__ = line.split(''' ''' )
words.append(splits[0] )
if len(UpperCamelCase ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) )
return examples
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ):
'''simple docstring'''
lowercase__ = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(UpperCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowercase__ = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(UpperCamelCase )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
if path:
with open(UpperCamelCase , '''r''' ) as f:
lowercase__ = f.read().splitlines()
if "O" not in labels:
lowercase__ = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : List[Any] ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : str ):
'''simple docstring'''
if path:
with open(UpperCamelCase , '''r''' ) as f:
lowercase__ = f.read().splitlines()
if "O" not in labels:
lowercase__ = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def UpperCamelCase__ (self : Tuple , UpperCamelCase : int , UpperCamelCase : Union[Split, str] ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ):
lowercase__ = mode.value
lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" )
lowercase__ = 1
lowercase__ = []
with open(UpperCamelCase , encoding='''utf-8''' ) as f:
for sentence in parse_incr(UpperCamelCase ):
lowercase__ = []
lowercase__ = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(UpperCamelCase ) == len(UpperCamelCase )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) )
guid_index += 1
return examples
def UpperCamelCase__ (self : Tuple , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ):
'''simple docstring'''
lowercase__ = 0
for sentence in parse_incr(UpperCamelCase ):
lowercase__ = preds_list[example_id]
lowercase__ = ''''''
for token in sentence:
out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) "
out += "\n"
writer.write(UpperCamelCase )
example_id += 1
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
if path:
with open(UpperCamelCase , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 2 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : List[Any] = logging.get_logger(__name__)
A : Union[str, Any] = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __A( a ):
snake_case_ = '''deformable_detr'''
snake_case_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _snake_case=True , _snake_case=None , _snake_case=3 , _snake_case=300 , _snake_case=1_024 , _snake_case=6 , _snake_case=1_024 , _snake_case=8 , _snake_case=6 , _snake_case=1_024 , _snake_case=8 , _snake_case=0.0 , _snake_case=True , _snake_case="relu" , _snake_case=256 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1.0 , _snake_case=True , _snake_case=False , _snake_case="sine" , _snake_case="resnet50" , _snake_case=True , _snake_case=False , _snake_case=4 , _snake_case=4 , _snake_case=4 , _snake_case=False , _snake_case=300 , _snake_case=False , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=1 , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=0.1 , _snake_case=0.25 , _snake_case=False , **_snake_case , ) -> Any:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__a = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_snake_case , _snake_case ):
__a = backbone_config.get('''model_type''' )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(_snake_case )
__a = use_timm_backbone
__a = backbone_config
__a = num_channels
__a = num_queries
__a = max_position_embeddings
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = init_xavier_std
__a = encoder_layerdrop
__a = auxiliary_loss
__a = position_embedding_type
__a = backbone
__a = use_pretrained_backbone
__a = dilation
# deformable attributes
__a = num_feature_levels
__a = encoder_n_points
__a = decoder_n_points
__a = two_stage
__a = two_stage_num_proposals
__a = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
__a = class_cost
__a = bbox_cost
__a = giou_cost
# Loss coefficients
__a = mask_loss_coefficient
__a = dice_loss_coefficient
__a = bbox_loss_coefficient
__a = giou_loss_coefficient
__a = eos_coefficient
__a = focal_alpha
__a = disable_custom_kernels
super().__init__(is_encoder_decoder=_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.d_model
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__a = self.backbone_config.to_dict()
__a = self.__class__.model_type
return output | 6 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = """megatron-bert"""
def __init__(self : Tuple , UpperCamelCase : Optional[int]=29056 , UpperCamelCase : Optional[Any]=1024 , UpperCamelCase : Any=24 , UpperCamelCase : int=16 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : int="gelu" , UpperCamelCase : int=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Any=512 , UpperCamelCase : int=2 , UpperCamelCase : Dict=0.02 , UpperCamelCase : Dict=1E-12 , UpperCamelCase : List[Any]=0 , UpperCamelCase : Optional[int]="absolute" , UpperCamelCase : List[Any]=True , **UpperCamelCase : str , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
| 2 | 0 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowercase_ = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
lowercase_ = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
lowercase_ = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
lowercase_ = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
lowercase_ = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
lowercase_ = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
lowercase_ = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def _snake_case( ) -> List[str]:
'''simple docstring'''
A__ , A__ = randrange(len(SCREAMING_SNAKE_CASE__ ) ), randrange(len(SCREAMING_SNAKE_CASE__ ) )
A__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
A__ , A__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 100 ) -> str:
'''simple docstring'''
return (generate_random_hand() for _ in range(SCREAMING_SNAKE_CASE__ ))
@pytest.mark.parametrize('hand, expected' , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
'''simple docstring'''
assert PokerHand(SCREAMING_SNAKE_CASE__ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> str:
'''simple docstring'''
assert PokerHand(SCREAMING_SNAKE_CASE__ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
A__ = PokerHand(SCREAMING_SNAKE_CASE__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
'''simple docstring'''
assert PokerHand(SCREAMING_SNAKE_CASE__ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
assert PokerHand(SCREAMING_SNAKE_CASE__ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
assert PokerHand(SCREAMING_SNAKE_CASE__ ).compare_with(PokerHand(SCREAMING_SNAKE_CASE__ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
assert PokerHand(SCREAMING_SNAKE_CASE__ ).compare_with(PokerHand(SCREAMING_SNAKE_CASE__ ) ) == expected
def _snake_case( ) -> Optional[int]:
'''simple docstring'''
A__ = [PokerHand(SCREAMING_SNAKE_CASE__ ) for hand in SORTED_HANDS]
A__ = poker_hands.copy()
shuffle(SCREAMING_SNAKE_CASE__ )
A__ = chain(sorted(SCREAMING_SNAKE_CASE__ ) )
for index, hand in enumerate(SCREAMING_SNAKE_CASE__ ):
assert hand == poker_hands[index]
def _snake_case( ) -> List[str]:
'''simple docstring'''
A__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=SCREAMING_SNAKE_CASE__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _snake_case( ) -> str:
'''simple docstring'''
A__ = PokerHand('2C 4S AS 3D 5C' )
A__ = True
A__ = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _snake_case( ) -> List[str]:
'''simple docstring'''
A__ = 0
A__ = os.path.abspath(os.path.dirname(SCREAMING_SNAKE_CASE__ ) )
A__ = os.path.join(SCREAMING_SNAKE_CASE__ , 'poker_hands.txt' )
with open(SCREAMING_SNAKE_CASE__ ) as file_hand:
for line in file_hand:
A__ = line[:14].strip()
A__ = line[15:].strip()
A__ , A__ = PokerHand(SCREAMING_SNAKE_CASE__ ), PokerHand(SCREAMING_SNAKE_CASE__ )
A__ = player.compare_with(SCREAMING_SNAKE_CASE__ )
if output == "Win":
answer += 1
assert answer == 376
| 7 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
lowerCamelCase : Any = re.compile(R'([A-Z]+)([A-Z][a-z])')
lowerCamelCase : str = re.compile(R'([a-z\d])([A-Z])')
lowerCamelCase : Optional[int] = re.compile(R'(?<!_)_(?!_)')
lowerCamelCase : List[Any] = re.compile(R'(_{2,})')
lowerCamelCase : str = R'^\w+(\.\w+)*$'
lowerCamelCase : Dict = R'<>:/\|?*'
def _SCREAMING_SNAKE_CASE (A ) -> Any:
"""simple docstring"""
lowercase__ = _uppercase_uppercase_re.sub(R'''\1_\2''' , A )
lowercase__ = _lowercase_uppercase_re.sub(R'''\1_\2''' , A )
return name.lower()
def _SCREAMING_SNAKE_CASE (A ) -> Tuple:
"""simple docstring"""
lowercase__ = _single_underscore_re.split(A )
lowercase__ = [_multiple_underscores_re.split(A ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(A ) if n != '''''' )
def _SCREAMING_SNAKE_CASE (A ) -> Tuple:
"""simple docstring"""
if os.path.basename(A ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]:
"""simple docstring"""
if os.path.basename(A ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , A ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(A )}-{split}"
def _SCREAMING_SNAKE_CASE (A , A , A , A=None ) -> List[str]:
"""simple docstring"""
lowercase__ = filename_prefix_for_split(A , A )
if filetype_suffix:
prefix += f".{filetype_suffix}"
lowercase__ = os.path.join(A , A )
return f"{filepath}*"
def _SCREAMING_SNAKE_CASE (A , A , A , A=None , A=None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = filename_prefix_for_split(A , A )
lowercase__ = os.path.join(A , A )
if shard_lengths:
lowercase__ = len(A )
lowercase__ = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(A )]
if filetype_suffix:
lowercase__ = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
lowercase__ = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 2 | 0 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowerCAmelCase_ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_28, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowerCAmelCase_ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_55, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowerCAmelCase_ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_55)
lowerCAmelCase_ = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
lowerCAmelCase_ = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
lowerCAmelCase_ = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
lowerCAmelCase_ = tf.keras.preprocessing.image.img_to_array(test_image)
lowerCAmelCase_ = np.expand_dims(test_image, axis=0)
lowerCAmelCase_ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowerCAmelCase_ = '''Normal'''
if result[0][0] == 1:
lowerCAmelCase_ = '''Abnormality detected''' | 8 |
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : str , UpperCamelCase : Tuple , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Optional[int]=13 , UpperCamelCase : Tuple=16 , UpperCamelCase : Union[str, Any]=7 , UpperCamelCase : List[Any]=True , UpperCamelCase : List[str]=True , UpperCamelCase : str=True , UpperCamelCase : Tuple=False , UpperCamelCase : str=True , UpperCamelCase : Tuple=2 , UpperCamelCase : Optional[int]=32 , UpperCamelCase : Any=4 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Tuple=30 , UpperCamelCase : str=0 , UpperCamelCase : Tuple=1 , UpperCamelCase : List[Any]=2 , UpperCamelCase : str=None , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = decoder_seq_length
# For common tests
lowercase__ = self.decoder_seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_model
lowercase__ = decoder_layers
lowercase__ = decoder_layers
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_attention_heads
lowercase__ = decoder_attention_heads
lowercase__ = eos_token_id
lowercase__ = bos_token_id
lowercase__ = pad_token_id
lowercase__ = decoder_start_token_id
lowercase__ = use_cache
lowercase__ = max_position_embeddings
lowercase__ = None
lowercase__ = decoder_seq_length
lowercase__ = 2
lowercase__ = 1
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
lowercase__ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def UpperCamelCase__ (self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , ):
'''simple docstring'''
lowercase__ = True
lowercase__ = TrOCRDecoder(config=UpperCamelCase ).to(UpperCamelCase ).eval()
lowercase__ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase )
lowercase__ = model(UpperCamelCase )
lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 )
lowercase__ = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
lowercase__ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
lowercase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__ = model(UpperCamelCase )['''last_hidden_state''']
lowercase__ = model(UpperCamelCase , past_key_values=UpperCamelCase )['''last_hidden_state''']
# select random slice
lowercase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
lowercase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase (lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCAmelCase__ : List[Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCAmelCase__ : Optional[Any] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : List[str] = False
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
lowercase__ = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCamelCase )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*UpperCamelCase )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
| 2 | 0 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__lowerCAmelCase : Optional[Any] ='base_with_context'
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
__SCREAMING_SNAKE_CASE : int = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=lowercase__ )
for lyr_num, lyr in enumerate(model.encoders ):
__SCREAMING_SNAKE_CASE : str = weights[F'''layers_{lyr_num}''']
__SCREAMING_SNAKE_CASE : int = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
__SCREAMING_SNAKE_CASE : List[Any] = ly_weight['''attention''']
__SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
__SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=lowercase__ )
for lyr_num, lyr in enumerate(model.encoders ):
__SCREAMING_SNAKE_CASE : List[str] = weights[F'''layers_{lyr_num}''']
__SCREAMING_SNAKE_CASE : int = ly_weight['''attention''']
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__SCREAMING_SNAKE_CASE : Dict = weights[F'''layers_{lyr_num}''']
__SCREAMING_SNAKE_CASE : int = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = ly_weight['''self_attention''']
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : Optional[int] = ly_weight['''MultiHeadDotProductAttention_0''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
__SCREAMING_SNAKE_CASE : int = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
__SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__SCREAMING_SNAKE_CASE : List[Any] = jnp.tree_util.tree_map(onp.array , lowercase__ )
__SCREAMING_SNAKE_CASE : str = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
__SCREAMING_SNAKE_CASE : Dict = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
__SCREAMING_SNAKE_CASE : Tuple = inference.parse_training_gin_file(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = inference.InferenceModel(args.checkpoint_path , lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
__SCREAMING_SNAKE_CASE : List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
__SCREAMING_SNAKE_CASE : str = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
__SCREAMING_SNAKE_CASE : Any = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__SCREAMING_SNAKE_CASE : int = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = load_decoder(ta_checkpoint['''target''']['''decoder'''] , lowercase__ )
__SCREAMING_SNAKE_CASE : int = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
__SCREAMING_SNAKE_CASE : Dict = SpectrogramDiffusionPipeline(
notes_encoder=lowercase__ , continuous_encoder=lowercase__ , decoder=lowercase__ , scheduler=lowercase__ , melgan=lowercase__ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
__lowerCAmelCase : Tuple =parser.parse_args()
main(args)
| 9 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
if not isinstance(A , A ):
raise TypeError('''only integers accepted as input''' )
else:
lowercase__ = str(abs(A ) )
lowercase__ = [list(A ) for char in range(len(A ) )]
for index in range(len(A ) ):
num_transpositions[index].pop(A )
return max(
int(''''''.join(list(A ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 2 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =tf.convert_to_tensor(
[
[
8.222_0991, # 3rd highest value; idx. 0
-0.562_0044,
5.2322_9752,
4.038_6393,
-6.879_8378,
-0.5478_5802,
-3.201_2153,
2.9277_7176,
1.8817_1953,
7.3534_1276, # 5th highest value; idx. 9
8.4320_7833, # 2nd highest value; idx. 10
-9.8571_1836,
-5.9620_9236,
-1.1303_9161,
-7.111_5294,
-0.836_9633,
-5.318_6408,
7.0642_7407,
0.8136_9344,
-0.8202_3817,
-5.917_9796,
0.5881_3443,
-6.9977_8438,
4.7155_1189,
-0.1877_1637,
7.4402_0759, # 4th highest value; idx. 25
9.3845_0987, # 1st highest value; idx. 26
2.1266_2941,
-9.3256_2038,
2.3565_2522,
], # cummulative prob of 5 highest values <= 0.6
[
0.5842_5518,
4.5313_9238,
-5.5751_0464,
-6.2803_0699,
-7.1952_9503,
-4.0212_2551,
1.3933_7037,
-6.0670_7057,
1.5948_0517,
-9.64_3119,
0.0390_7799,
0.6723_1762,
-8.8820_6726,
6.2711_5922, # 4th highest value; idx. 13
2.2852_0723,
4.8276_7506,
4.3042_1368,
8.827_5313, # 2nd highest value; idx. 17
5.4402_9958, # 5th highest value; idx. 18
-4.473_5794,
7.3857_9536, # 3rd highest value; idx. 20
-2.9105_1663,
2.6194_6077,
-2.567_4762,
-9.4895_9302,
-4.0292_2645,
-1.3541_6918,
9.6770_2323, # 1st highest value; idx. 27
-5.8947_8553,
1.8537_0467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
lowerCamelCase__: List[Any] =tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
lowerCamelCase__: str =tf.convert_to_tensor(
[8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above
lowerCamelCase__: Any =tf_top_k_top_p_filtering(UpperCAmelCase_ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4)
lowerCamelCase__: List[str] =output[output != -float("inf")]
lowerCamelCase__: List[Any] =tf.cast(
tf.where(tf.not_equal(UpperCAmelCase_ , tf.constant(-float("inf") , dtype=tf.floataa))) , dtype=tf.intaa , )
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-1_2)
tf.debugging.assert_equal(UpperCAmelCase_ , UpperCAmelCase_)
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
lowercase_ = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[int] =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")
lowerCamelCase__: Any =2
lowerCamelCase__: List[Any] =2
class _SCREAMING_SNAKE_CASE ( tf.Module ):
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : Dict) ->Union[str, Any]:
'''simple docstring'''
super(UpperCAmelCase_ , self).__init__()
lowerCamelCase__: Union[str, Any] =model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids"),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask"),
) , jit_compile=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any) ->int:
'''simple docstring'''
lowerCamelCase__: Any =self.model.generate(
input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , max_new_tokens=UpperCAmelCase_ , return_dict_in_generate=UpperCAmelCase_ , )
return {"sequences": outputs["sequences"]}
lowerCamelCase__: Dict =[[2, 0], [102, 103]]
lowerCamelCase__: List[Any] =[[1, 0], [1, 1]]
lowerCamelCase__: Union[str, Any] =DummyModel(model=UpperCAmelCase_)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase_ , UpperCAmelCase_ , signatures={"serving_default": dummy_model.serving})
lowerCamelCase__: Tuple =tf.saved_model.load(UpperCAmelCase_).signatures["serving_default"]
for batch_size in range(1 , len(UpperCAmelCase_) + 1):
lowerCamelCase__: Optional[Any] ={
"input_ids": tf.constant(dummy_input_ids[:batch_size]),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size]),
}
lowerCamelCase__: int =serving_func(**UpperCAmelCase_)["sequences"]
lowerCamelCase__: int =test_model.generate(**UpperCAmelCase_ , max_new_tokens=UpperCAmelCase_)
tf.debugging.assert_equal(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Tuple =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")
lowerCamelCase__: Tuple =1
lowerCamelCase__: int =2
class _SCREAMING_SNAKE_CASE ( tf.Module ):
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : int) ->Tuple:
'''simple docstring'''
super(UpperCAmelCase_ , self).__init__()
lowerCamelCase__: str =model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids"),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask"),
) , jit_compile=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Dict =self.model.generate(
input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , max_new_tokens=UpperCAmelCase_ , return_dict_in_generate=UpperCAmelCase_ , )
return {"sequences": outputs["sequences"]}
lowerCamelCase__: Dict =[[2], [102, 103]]
lowerCamelCase__: Tuple =[[1], [1, 1]]
lowerCamelCase__: List[str] =DummyModel(model=UpperCAmelCase_)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase_ , UpperCAmelCase_ , signatures={"serving_default": dummy_model.serving})
lowerCamelCase__: Dict =tf.saved_model.load(UpperCAmelCase_).signatures["serving_default"]
for input_row in range(len(UpperCAmelCase_)):
lowerCamelCase__: Optional[Any] ={
"input_ids": tf.constant([dummy_input_ids[input_row]]),
"attention_mask": tf.constant([dummy_attention_masks[input_row]]),
}
lowerCamelCase__: List[Any] =serving_func(**UpperCAmelCase_)["sequences"]
lowerCamelCase__: Optional[Any] =test_model.generate(**UpperCAmelCase_ , max_new_tokens=UpperCAmelCase_)
tf.debugging.assert_equal(UpperCAmelCase_ , UpperCAmelCase_)
@slow
@require_tensorflow_text
def SCREAMING_SNAKE_CASE_ (self : Any) ->Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self : Optional[int]) ->Optional[int]:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Dict =text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase_ , "spiece.model") , "rb").read())
lowerCamelCase__: Dict =TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5")
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Optional[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Any =self.tokenizer.tokenize(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: Dict =text.pad_model_inputs(
UpperCAmelCase_ , max_seq_length=64 , pad_value=self.model.config.pad_token_id)
lowerCamelCase__: Dict =self.model.generate(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
return self.tokenizer.detokenize(UpperCAmelCase_)
lowerCamelCase__: int =CompleteSentenceTransformer()
lowerCamelCase__: Dict =tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs")
lowerCamelCase__: Optional[Any] =complete_model(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =tf.keras.Model(UpperCAmelCase_ , UpperCAmelCase_)
keras_model.save(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->int:
'''simple docstring'''
lowerCamelCase__: str ={
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 10,
"temperature": 0.7,
}
lowerCamelCase__: Any =14
lowerCamelCase__: Tuple =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
lowerCamelCase__: int ="Hello, my dog is cute and"
lowerCamelCase__: Optional[int] =tokenizer(UpperCAmelCase_ , return_tensors="tf")
lowerCamelCase__: Tuple =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")
lowerCamelCase__: Union[str, Any] =638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0"):
tf.random.set_seed(0)
lowerCamelCase__: int =model.generate(**UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
self.assertTrue(expectation == len(generated_tokens[0]))
lowerCamelCase__: List[str] =[638, 198]
with tf.device(":/CPU:0"):
tf.random.set_seed(0)
lowerCamelCase__: Dict =model.generate(**UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
self.assertTrue(expectation == len(generated_tokens[0]))
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
lowerCamelCase__: Optional[int] ="Hugging Face is a technology company based in New York and Paris."
lowerCamelCase__: Any =bart_tokenizer(UpperCAmelCase_ , return_tensors="tf").input_ids
lowerCamelCase__: Optional[int] =TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart")
lowerCamelCase__: Optional[int] =bart_model.generate(UpperCAmelCase_).numpy()
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : int) ->Any:
'''simple docstring'''
return super().call(UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart")
lowerCamelCase__: Union[str, Any] =bart_model.generate(UpperCAmelCase_ , foo="bar").numpy()
self.assertTrue(np.array_equal(UpperCAmelCase_ , UpperCAmelCase_))
class _SCREAMING_SNAKE_CASE ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any]) ->Optional[int]:
'''simple docstring'''
return super().call(UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =FakeEncoder(bart_model.config , bart_model.model.shared)
lowerCamelCase__: int =fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowerCamelCase__: Any =bart_model.generate(UpperCAmelCase_).numpy()
with self.assertRaises(UpperCAmelCase_):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(UpperCAmelCase_ , foo="bar")
| 10 |
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowerCamelCase : str = Mapping[str, np.ndarray]
lowerCamelCase : List[Any] = Mapping[str, Any] # Is a nested dict.
lowerCamelCase : Any = 0.0_1
@dataclasses.dataclass(frozen=lowercase_ )
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowerCAmelCase__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowerCAmelCase__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowerCAmelCase__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowerCAmelCase__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
lowerCAmelCase__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
lowerCAmelCase__ : Optional[Sequence[int]] = None
def _SCREAMING_SNAKE_CASE (A ) -> Protein:
"""simple docstring"""
lowercase__ = R'''(\[[A-Z]+\]\n)'''
lowercase__ = [tag.strip() for tag in re.split(A , A ) if len(A ) > 0]
lowercase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
lowercase__ = ["N", "CA", "C"]
lowercase__ = None
lowercase__ = None
lowercase__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowercase__ = g[1][0].strip()
for i in range(len(A ) ):
if seq[i] not in residue_constants.restypes:
lowercase__ = '''X''' # FIXME: strings are immutable
lowercase__ = np.array(
[residue_constants.restype_order.get(A , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowercase__ = []
for axis in range(3 ):
tertiary.append(list(map(A , g[1][axis].split() ) ) )
lowercase__ = np.array(A )
lowercase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(A ):
lowercase__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowercase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
lowercase__ = np.zeros(
(
len(A ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(A ):
lowercase__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=A , atom_mask=A , aatype=A , residue_index=np.arange(len(A ) ) , b_factors=A , )
def _SCREAMING_SNAKE_CASE (A , A = 0 ) -> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = prot.remark
if remark is not None:
pdb_headers.append(f"REMARK {remark}" )
lowercase__ = prot.parents
lowercase__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowercase__ = [p for i, p in zip(A , A ) if i == chain_id]
if parents is None or len(A ) == 0:
lowercase__ = ['''N/A''']
pdb_headers.append(f"PARENT {' '.join(A )}" )
return pdb_headers
def _SCREAMING_SNAKE_CASE (A , A ) -> str:
"""simple docstring"""
lowercase__ = []
lowercase__ = pdb_str.split('''\n''' )
lowercase__ = prot.remark
if remark is not None:
out_pdb_lines.append(f"REMARK {remark}" )
lowercase__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowercase__ = []
if prot.parents_chain_index is not None:
lowercase__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(A ) , [] )
parent_dict[str(A )].append(A )
lowercase__ = max([int(A ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowercase__ = parent_dict.get(str(A ) , ['''N/A'''] )
parents_per_chain.append(A )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowercase__ = [['''N/A''']]
def make_parent_line(A ) -> str:
return f"PARENT {' '.join(A )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowercase__ = 0
for i, l in enumerate(A ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(A )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(A ):
lowercase__ = parents_per_chain[chain_counter]
else:
lowercase__ = ['''N/A''']
out_pdb_lines.append(make_parent_line(A ) )
return "\n".join(A )
def _SCREAMING_SNAKE_CASE (A ) -> str:
"""simple docstring"""
lowercase__ = residue_constants.restypes + ['''X''']
def res_atoa(A ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
lowercase__ = residue_constants.atom_types
lowercase__ = []
lowercase__ = prot.atom_mask
lowercase__ = prot.aatype
lowercase__ = prot.atom_positions
lowercase__ = prot.residue_index.astype(np.intaa )
lowercase__ = prot.b_factors
lowercase__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
lowercase__ = get_pdb_headers(A )
if len(A ) > 0:
pdb_lines.extend(A )
lowercase__ = aatype.shape[0]
lowercase__ = 1
lowercase__ = 0
lowercase__ = string.ascii_uppercase
lowercase__ = None
# Add all atom sites.
for i in range(A ):
lowercase__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(A , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowercase__ = '''ATOM'''
lowercase__ = atom_name if len(A ) == 4 else f" {atom_name}"
lowercase__ = ''''''
lowercase__ = ''''''
lowercase__ = 1.00
lowercase__ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowercase__ = ''''''
lowercase__ = '''A'''
if chain_index is not None:
lowercase__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowercase__ = (
f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
f"{res_name_a:>3} {chain_tag:>1}"
f"{residue_index[i]:>4}{insertion_code:>1} "
f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
f"{occupancy:>6.2f}{b_factor:>6.2f} "
f"{element:>2}{charge:>2}"
)
pdb_lines.append(A )
atom_index += 1
lowercase__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowercase__ = True
lowercase__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowercase__ = '''TER'''
lowercase__ = (
f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(A )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(A , A ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(A )
def _SCREAMING_SNAKE_CASE (A ) -> np.ndarray:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _SCREAMING_SNAKE_CASE (A , A , A = None , A = None , A = None , A = None , A = None , ) -> Protein:
"""simple docstring"""
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=A , remark=A , parents=A , parents_chain_index=A , )
| 2 | 0 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 11 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (A , A ) -> list[list[int]]:
"""simple docstring"""
lowercase__ = []
create_all_state(1 , A , A , [] , A )
return result
def _SCREAMING_SNAKE_CASE (A , A , A , A , A , ) -> None:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(A , total_number - level + 2 ):
current_list.append(A )
create_all_state(i + 1 , A , level - 1 , A , A )
current_list.pop()
def _SCREAMING_SNAKE_CASE (A ) -> None:
"""simple docstring"""
for i in total_list:
print(*A )
if __name__ == "__main__":
lowerCamelCase : Tuple = 4
lowerCamelCase : Union[str, Any] = 2
lowerCamelCase : Dict = generate_all_combinations(n, k)
print_all_state(total_list)
| 2 | 0 |
# Lint as: python3
import itertools
import os
import re
UpperCAmelCase_ = re.compile(r'([A-Z]+)([A-Z][a-z])')
UpperCAmelCase_ = re.compile(r'([a-z\d])([A-Z])')
UpperCAmelCase_ = re.compile(r'(?<!_)_(?!_)')
UpperCAmelCase_ = re.compile(r'(_{2,})')
UpperCAmelCase_ = r'^\w+(\.\w+)*$'
UpperCAmelCase_ = r'<>:/\|?*'
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = _uppercase_uppercase_re.sub(R"""\1_\2""" , A__ )
__lowerCamelCase = _lowercase_uppercase_re.sub(R"""\1_\2""" , A__ )
return name.lower()
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = _single_underscore_re.split(A__ )
__lowerCamelCase = [_multiple_underscores_re.split(A__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(A__ ) if n != """""" )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
if os.path.basename(A__ ) != name:
raise ValueError(f'Should be a dataset name, not a path: {name}' )
return camelcase_to_snakecase(A__ )
def lowerCamelCase__ ( A__ : Dict , A__ : Dict ):
'''simple docstring'''
if os.path.basename(A__ ) != name:
raise ValueError(f'Should be a dataset name, not a path: {name}' )
if not re.match(_split_re , A__ ):
raise ValueError(f'Split name should match \'{_split_re}\'\' but got \'{split}\'.' )
return f'{filename_prefix_for_name(A__ )}-{split}'
def lowerCamelCase__ ( A__ : int , A__ : Tuple , A__ : Optional[Any] , A__ : str=None ):
'''simple docstring'''
__lowerCamelCase = filename_prefix_for_split(A__ , A__ )
if filetype_suffix:
prefix += f'.{filetype_suffix}'
__lowerCamelCase = os.path.join(A__ , A__ )
return f'{filepath}*'
def lowerCamelCase__ ( A__ : List[str] , A__ : List[Any] , A__ : int , A__ : int=None , A__ : Any=None ):
'''simple docstring'''
__lowerCamelCase = filename_prefix_for_split(A__ , A__ )
__lowerCamelCase = os.path.join(A__ , A__ )
if shard_lengths:
__lowerCamelCase = len(A__ )
__lowerCamelCase = [f'{prefix}-{shard_id:05d}-of-{num_shards:05d}' for shard_id in range(A__ )]
if filetype_suffix:
__lowerCamelCase = [filename + f'.{filetype_suffix}' for filename in filenames]
return filenames
else:
__lowerCamelCase = prefix
if filetype_suffix:
filename += f'.{filetype_suffix}'
return [filename]
| 12 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCamelCase : Optional[Any] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
lowerCamelCase : Tuple = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
lowerCamelCase : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
lowerCamelCase : Any = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
lowerCamelCase : Tuple = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
lowerCamelCase : Optional[int] = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
lowerCamelCase : Dict = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
"""simple docstring"""
lowercase__ ,lowercase__ = randrange(len(A ) ), randrange(len(A ) )
lowercase__ = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
lowercase__ ,lowercase__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _SCREAMING_SNAKE_CASE (A = 100 ) -> str:
"""simple docstring"""
return (generate_random_hand() for _ in range(A ))
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]:
"""simple docstring"""
assert PokerHand(A )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(A )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , A )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Any:
"""simple docstring"""
lowercase__ = PokerHand(A )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Tuple:
"""simple docstring"""
assert PokerHand(A )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(A )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected
def _SCREAMING_SNAKE_CASE () -> Tuple:
"""simple docstring"""
lowercase__ = [PokerHand(A ) for hand in SORTED_HANDS]
lowercase__ = poker_hands.copy()
shuffle(A )
lowercase__ = chain(sorted(A ) )
for index, hand in enumerate(A ):
assert hand == poker_hands[index]
def _SCREAMING_SNAKE_CASE () -> List[Any]:
"""simple docstring"""
lowercase__ = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=A )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _SCREAMING_SNAKE_CASE () -> int:
"""simple docstring"""
lowercase__ = PokerHand('''2C 4S AS 3D 5C''' )
lowercase__ = True
lowercase__ = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
"""simple docstring"""
lowercase__ = 0
lowercase__ = os.path.abspath(os.path.dirname(A ) )
lowercase__ = os.path.join(A , '''poker_hands.txt''' )
with open(A ) as file_hand:
for line in file_hand:
lowercase__ = line[:14].strip()
lowercase__ = line[15:].strip()
lowercase__ ,lowercase__ = PokerHand(A ), PokerHand(A )
lowercase__ = player.compare_with(A )
if output == "Win":
answer += 1
assert answer == 376
| 2 | 0 |
from __future__ import annotations
import os
from typing import Any
import requests
lowerCAmelCase : Tuple = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowerCAmelCase : Union[str, Any] = BASE_URL + """/user"""
# https://github.com/settings/tokens
lowerCAmelCase : Dict = os.environ.get("""USER_TOKEN""", """""")
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple = {
"Authorization": f"token {auth_token}",
"Accept": "application/vnd.github.v3+json",
}
return requests.get(_UpperCAmelCase , headers=_UpperCAmelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 13 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase : str = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase : List[Any] = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase : Any = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCamelCase : int = model.state_dict()
lowerCamelCase : int = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
lowerCamelCase : Tuple = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
lowerCamelCase : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
lowerCamelCase : List[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
lowerCamelCase : Tuple = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
lowerCamelCase : Optional[int] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
lowerCamelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
lowerCamelCase : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
lowerCamelCase : Any = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
lowerCamelCase : Optional[int] = state_dict['cls.predictions.decoder.weight']
lowerCamelCase : str = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase : str = state_dict[f"""cls.predictions.transform.dense.{w}"""]
lowerCamelCase : Any = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 2 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 14 |
'''simple docstring'''
from ....utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : int=2048 ):
'''simple docstring'''
lowercase__ = config.__dict__
lowercase__ = modal_hidden_size
if num_labels:
lowercase__ = num_labels
| 2 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
SCREAMING_SNAKE_CASE :Union[str, Any] = get_tests_dir('fixtures')
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[str] ):
# A mock response for an HTTP head request to emulate server down
__A = mock.Mock()
__A = 5_00
__A = {}
__A = HTTPError
__A = {}
# Download this model to make sure it's in the cache.
__A = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" ,return_value=A ) as mock_head:
__A = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self : Optional[int] ):
# This test is for deprecated behavior and can be removed in v5
__A = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls : List[str] ):
__A = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] ):
try:
delete_repo(token=cls._token ,repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def UpperCamelCase_ ( self : Dict ):
__A = WavaVecaFeatureExtractor.from_pretrained(A )
feature_extractor.push_to_hub("test-feature-extractor" ,use_auth_token=self._token )
__A = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A ,getattr(A ,A ) )
# Reset repo
delete_repo(token=self._token ,repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A ,repo_id="test-feature-extractor" ,push_to_hub=A ,use_auth_token=self._token )
__A = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A ,getattr(A ,A ) )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = WavaVecaFeatureExtractor.from_pretrained(A )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" ,use_auth_token=self._token )
__A = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A ,getattr(A ,A ) )
# Reset repo
delete_repo(token=self._token ,repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A ,repo_id="valid_org/test-feature-extractor-org" ,push_to_hub=A ,use_auth_token=self._token )
__A = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A ,getattr(A ,A ) )
def UpperCamelCase_ ( self : Tuple ):
CustomFeatureExtractor.register_for_auto_class()
__A = CustomFeatureExtractor.from_pretrained(A )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map ,{"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} ,)
__A = AutoFeatureExtractor.from_pretrained(
f'''{USER}/test-dynamic-feature-extractor''' ,trust_remote_code=A )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ ,"CustomFeatureExtractor" )
| 15 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = """cvt"""
def __init__(self : int , UpperCamelCase : List[Any]=3 , UpperCamelCase : int=[7, 3, 3] , UpperCamelCase : str=[4, 2, 2] , UpperCamelCase : Dict=[2, 1, 1] , UpperCamelCase : Dict=[64, 192, 384] , UpperCamelCase : Dict=[1, 3, 6] , UpperCamelCase : Dict=[1, 2, 10] , UpperCamelCase : Any=[4.0, 4.0, 4.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : int=[0.0, 0.0, 0.1] , UpperCamelCase : Any=[True, True, True] , UpperCamelCase : int=[False, False, True] , UpperCamelCase : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase : Optional[int]=[3, 3, 3] , UpperCamelCase : Tuple=[1, 1, 1] , UpperCamelCase : Any=[2, 2, 2] , UpperCamelCase : Dict=[1, 1, 1] , UpperCamelCase : List[str]=[1, 1, 1] , UpperCamelCase : str=0.02 , UpperCamelCase : int=1E-12 , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = patch_stride
lowercase__ = patch_padding
lowercase__ = embed_dim
lowercase__ = num_heads
lowercase__ = depth
lowercase__ = mlp_ratio
lowercase__ = attention_drop_rate
lowercase__ = drop_rate
lowercase__ = drop_path_rate
lowercase__ = qkv_bias
lowercase__ = cls_token
lowercase__ = qkv_projection_method
lowercase__ = kernel_qkv
lowercase__ = padding_kv
lowercase__ = stride_kv
lowercase__ = padding_q
lowercase__ = stride_q
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
| 2 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowerCamelCase : Any = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowerCamelCase : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowerCamelCase : Any = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
lowerCamelCase : List[Any] = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
lowerCamelCase : List[str] = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
lowerCamelCase : List[str] = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(64, 64)
)
lowerCamelCase : Optional[int] = tf.keras.preprocessing.image.img_to_array(test_image)
lowerCamelCase : str = np.expand_dims(test_image, axis=0)
lowerCamelCase : List[str] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowerCamelCase : Any = 'Normal'
if result[0][0] == 1:
lowerCamelCase : Any = 'Abnormality detected'
| 2 | 0 |
"""simple docstring"""
def _A ( UpperCamelCase_ : list, UpperCamelCase_ : list) -> float:
'''simple docstring'''
_validate_point(UpperCamelCase_)
_validate_point(UpperCamelCase_)
if len(UpperCamelCase_) != len(UpperCamelCase_):
raise ValueError("Both points must be in the same n-dimensional space")
return float(sum(abs(a - b) for a, b in zip(UpperCamelCase_, UpperCamelCase_)))
def _A ( UpperCamelCase_ : list[float]) -> None:
'''simple docstring'''
if point:
if isinstance(UpperCamelCase_, UpperCamelCase_):
for item in point:
if not isinstance(UpperCamelCase_, (int, float)):
__lowercase = (
"Expected a list of numbers as input, found "
F"""{type(UpperCamelCase_).__name__}"""
)
raise TypeError(UpperCamelCase_)
else:
__lowercase = F"""Expected a list of numbers as input, found {type(UpperCamelCase_).__name__}"""
raise TypeError(UpperCamelCase_)
else:
raise ValueError("Missing an input")
def _A ( UpperCamelCase_ : list, UpperCamelCase_ : list) -> float:
'''simple docstring'''
_validate_point(UpperCamelCase_)
_validate_point(UpperCamelCase_)
if len(UpperCamelCase_) != len(UpperCamelCase_):
raise ValueError("Both points must be in the same n-dimensional space")
return float(sum(abs(x - y) for x, y in zip(UpperCamelCase_, UpperCamelCase_)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
'''simple docstring'''
class __lowerCAmelCase : # Public class to implement a graph
'''simple docstring'''
def __init__(self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ):
'''simple docstring'''
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCamelCase__ (self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ):
'''simple docstring'''
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase )
def UpperCamelCase__ (self : Dict ): # And finally, count all islands.
'''simple docstring'''
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase , UpperCamelCase , UpperCamelCase )
count += 1
return count
| 2 | 0 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__lowerCamelCase : Any = logging.get_logger(__name__)
def _snake_case ( lowerCAmelCase : bool , lowerCAmelCase : bool ):
"""simple docstring"""
def run_func(lowerCAmelCase : int ):
@wraps(lowerCAmelCase )
def run_in_eager_mode(*lowerCAmelCase : Tuple , **lowerCAmelCase : Any ):
return func(*lowerCAmelCase , **lowerCAmelCase )
@wraps(lowerCAmelCase )
@tf.function(experimental_compile=lowerCAmelCase )
def run_in_graph_mode(*lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : List[str] ):
return func(*lowerCAmelCase , **lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = random.Random()
SCREAMING_SNAKE_CASE_ : str = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class a__ ( A__ ):
A = 42
A = 42
A = "TensorFlow"
@property
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return tf.__version__
def __UpperCamelCase ( self : int,_A : str,_A : int,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
SCREAMING_SNAKE_CASE_ : Any = self._prepare_inference_func(_A,_A,_A )
return self._measure_speed(_inference )
def __UpperCamelCase ( self : str,_A : str,_A : int,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_train_func(_A,_A,_A )
return self._measure_speed(_train )
def __UpperCamelCase ( self : Dict,_A : str,_A : int,_A : int ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx],_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
SCREAMING_SNAKE_CASE_ : str = self._prepare_inference_func(_A,_A,_A )
return self._measure_memory(_inference )
def __UpperCamelCase ( self : Optional[Any],_A : str,_A : int,_A : int ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx],_A )
SCREAMING_SNAKE_CASE_ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
SCREAMING_SNAKE_CASE_ : str = self._prepare_train_func(_A,_A,_A )
return self._measure_memory(_train )
def __UpperCamelCase ( self : int,_A : str,_A : int,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
SCREAMING_SNAKE_CASE_ : Dict = (
hasattr(_A,"architectures" )
and isinstance(config.architectures,_A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
SCREAMING_SNAKE_CASE_ : List[Any] = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
SCREAMING_SNAKE_CASE_ : List[Any] = __import__("transformers",fromlist=[model_class] )
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model_cls(_A )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = TF_MODEL_MAPPING[config.__class__](_A )
# encoder-decoder has vocab size saved differently
SCREAMING_SNAKE_CASE_ : Tuple = config.vocab_size if hasattr(_A,"vocab_size" ) else config.encoder.vocab_size
SCREAMING_SNAKE_CASE_ : str = random_input_ids(_A,_A,_A )
@run_with_tf_optimizations(self.args.eager_mode,self.args.use_xla )
def encoder_decoder_forward():
return model(_A,decoder_input_ids=_A,training=_A )
@run_with_tf_optimizations(self.args.eager_mode,self.args.use_xla )
def encoder_forward():
return model(_A,training=_A )
SCREAMING_SNAKE_CASE_ : Tuple = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __UpperCamelCase ( self : Dict,_A : str,_A : int,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
SCREAMING_SNAKE_CASE_ : Dict = (
hasattr(_A,"architectures" )
and isinstance(config.architectures,_A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
SCREAMING_SNAKE_CASE_ : Dict = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
SCREAMING_SNAKE_CASE_ : Tuple = __import__("transformers",fromlist=[model_class] )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model_cls(_A )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
SCREAMING_SNAKE_CASE_ : Any = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_A )
# encoder-decoder has vocab size saved differently
SCREAMING_SNAKE_CASE_ : Any = config.vocab_size if hasattr(_A,"vocab_size" ) else config.encoder.vocab_size
SCREAMING_SNAKE_CASE_ : int = random_input_ids(_A,_A,_A )
@run_with_tf_optimizations(self.args.eager_mode,self.args.use_xla )
def encoder_decoder_train():
SCREAMING_SNAKE_CASE_ : Dict = model(_A,decoder_input_ids=_A,labels=_A,training=_A )[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.gradients(_A,model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode,self.args.use_xla )
def encoder_train():
SCREAMING_SNAKE_CASE_ : Tuple = model(_A,labels=_A,training=_A )[0]
SCREAMING_SNAKE_CASE_ : Tuple = tf.gradients(_A,model.trainable_variables )
return gradients
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __UpperCamelCase ( self : int,_A : Any ):
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(_A,repeat=1,number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
SCREAMING_SNAKE_CASE_ : Optional[int] = timeit.repeat(
_A,repeat=self.args.repeat,number=10,)
return min(_A ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
def __UpperCamelCase ( self : List[str],_A : Callable[[], None] ):
"""simple docstring"""
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
SCREAMING_SNAKE_CASE_ : List[str] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
SCREAMING_SNAKE_CASE_ : Optional[int] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
SCREAMING_SNAKE_CASE_ : str = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nvml.nvmlDeviceGetMemoryInfo(_A )
SCREAMING_SNAKE_CASE_ : Any = meminfo.used
SCREAMING_SNAKE_CASE_ : List[Any] = Memory(_A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = measure_peak_memory_cpu(_A )
SCREAMING_SNAKE_CASE_ : str = Memory(_A ) if isinstance(_A,_A ) else memory_bytes
if self.args.trace_memory_line_by_line:
SCREAMING_SNAKE_CASE_ : Optional[int] = stop_memory_tracing(_A )
if memory is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = summary.total
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
return "N/A", None
| 18 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
lowerCamelCase : Tuple = 'naver-clova-ix/donut-base'
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = DonutProcessor.from_pretrained(UpperCamelCase )
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowercase__ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowercase__ = self.processor.tokenajson(UpperCamelCase )
self.assertDictEqual(UpperCamelCase , UpperCamelCase )
| 2 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , lowercase=1 / 255 , lowercase=True , ) -> Dict:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_pad
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=False ) -> str:
if not batched:
lowerCamelCase_ = image_inputs[0]
if isinstance(lowercase , Image.Image ):
lowerCamelCase_ , lowerCamelCase_ = image.size
else:
lowerCamelCase_ , lowerCamelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ = int(self.size["shortest_edge"] * h / w )
lowerCamelCase_ = self.size["shortest_edge"]
elif w > h:
lowerCamelCase_ = self.size["shortest_edge"]
lowerCamelCase_ = int(self.size["shortest_edge"] * w / h )
else:
lowerCamelCase_ = self.size["shortest_edge"]
lowerCamelCase_ = self.size["shortest_edge"]
else:
lowerCamelCase_ = []
for image in image_inputs:
lowerCamelCase_ , lowerCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ = max(lowercase , key=lambda lowercase : item[0] )[0]
lowerCamelCase_ = max(lowercase , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = DeformableDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "image_mean" ) )
self.assertTrue(hasattr(lowercase , "image_std" ) )
self.assertTrue(hasattr(lowercase , "do_normalize" ) )
self.assertTrue(hasattr(lowercase , "do_resize" ) )
self.assertTrue(hasattr(lowercase , "do_rescale" ) )
self.assertTrue(hasattr(lowercase , "do_pad" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowercase )
lowerCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
pass
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
lowerCamelCase_ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(lowercase , return_tensors="pt" ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(lowercase , return_tensors="pt" ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
# prepare image and target
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {"image_id": 39769, "annotations": target}
# encode them
lowerCamelCase_ = DeformableDetrImageProcessor()
lowerCamelCase_ = image_processing(images=lowercase , annotations=lowercase , return_tensors="pt" )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowercase )
lowerCamelCase_ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase )
lowerCamelCase_ = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase ) )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase ) )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
# prepare image, target and masks_path
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
lowerCamelCase_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowerCamelCase_ = DeformableDetrImageProcessor(format="coco_panoptic" )
lowerCamelCase_ = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors="pt" )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowercase )
lowerCamelCase_ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase )
lowerCamelCase_ = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase ) )
# verify masks
lowerCamelCase_ = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowercase )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase ) )
| 19 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (A ) -> bool:
"""simple docstring"""
return len(set(A ) ) == len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
class __snake_case :
def __init__( self ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = arr.split(""",""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = [int(self.array[0] )] * len(self.array )
lowercase : Any = [int(self.array[0] )] * len(self.array )
for i in range(1 ,len(self.array ) ):
lowercase : Optional[int] = max(
int(self.array[i] ) + sum_value[i - 1] ,int(self.array[i] ) )
lowercase : Optional[int] = max(sum_value[i] ,rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
lowercase : Any = input("""please input some numbers:""")
lowercase : Union[str, Any] = SubArray(whole_array)
lowercase : Any = array.solve_sub_array()
print(("""the results is:""", re))
| 20 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCamelCase : Any = None
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase : List[str] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase : Any = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : int = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ : Optional[int] = TaTokenizer
lowerCAmelCase__ : List[int] = []
def __init__(self : Dict , UpperCamelCase : str=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Any="</s>" , UpperCamelCase : str="<unk>" , UpperCamelCase : List[str]="<pad>" , UpperCamelCase : List[str]=100 , UpperCamelCase : Tuple=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowercase__ = [f"<extra_id_{i}>" for i in range(UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase__ = len(set(filter(lambda UpperCamelCase : bool('''extra_id_''' in str(UpperCamelCase ) ) , UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , extra_ids=UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
lowercase__ = extra_ids
@staticmethod
def UpperCamelCase__ (UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase , )
return max_model_length
def UpperCamelCase__ (self : Any , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase__ = os.path.join(
UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
logger.info(f"Copy vocab file to {out_vocab_file}" )
return (out_vocab_file,)
def UpperCamelCase__ (self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase__ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase ) for token in self.get_sentinel_tokens()]
| 2 | 0 |
from collections import deque
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : Optional[Any] = process_name # process name
_lowercase : List[Any] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_lowercase : Tuple = arrival_time
_lowercase : Any = burst_time # remaining burst time
_lowercase : Optional[int] = 0 # total time of the process wait in ready queue
_lowercase : Union[str, Any] = 0 # time from arrival time to completion time
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> None:
"""simple docstring"""
_lowercase : List[str] = number_of_queues
# time slice of queues that round robin algorithm applied
_lowercase : str = time_slices
# unfinished process is in this ready_queue
_lowercase : Optional[Any] = queue
# current time
_lowercase : Union[str, Any] = current_time
# finished process is in this sequence queue
_lowercase : deque[Process] = deque()
def UpperCamelCase ( self) -> list[str]:
"""simple docstring"""
_lowercase : List[Any] = []
for i in range(len(self.finish_queue)):
sequence.append(self.finish_queue[i].process_name)
return sequence
def UpperCamelCase ( self, lowerCamelCase) -> list[int]:
"""simple docstring"""
_lowercase : Optional[int] = []
for i in range(len(lowerCamelCase)):
waiting_times.append(queue[i].waiting_time)
return waiting_times
def UpperCamelCase ( self, lowerCamelCase) -> list[int]:
"""simple docstring"""
_lowercase : Tuple = []
for i in range(len(lowerCamelCase)):
turnaround_times.append(queue[i].turnaround_time)
return turnaround_times
def UpperCamelCase ( self, lowerCamelCase) -> list[int]:
"""simple docstring"""
_lowercase : Optional[int] = []
for i in range(len(lowerCamelCase)):
completion_times.append(queue[i].stop_time)
return completion_times
def UpperCamelCase ( self, lowerCamelCase) -> list[int]:
"""simple docstring"""
return [q.burst_time for q in queue]
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCamelCase ( self, lowerCamelCase) -> deque[Process]:
"""simple docstring"""
_lowercase : deque[Process] = deque() # sequence deque of finished process
while len(lowerCamelCase) != 0:
_lowercase : List[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowerCamelCase)
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_lowercase : Optional[int] = 0
# set the process's turnaround time because it is finished
_lowercase : str = self.current_time - cp.arrival_time
# set the completion time
_lowercase : Any = self.current_time
# add the process to queue that has finished queue
finished.append(lowerCamelCase)
self.finish_queue.extend(lowerCamelCase) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> tuple[deque[Process], deque[Process]]:
"""simple docstring"""
_lowercase : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowerCamelCase)):
_lowercase : int = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowerCamelCase)
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_lowercase : str = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowerCamelCase)
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_lowercase : str = 0
# set the finish time
_lowercase : Union[str, Any] = self.current_time
# update the process' turnaround time because it is finished
_lowercase : List[str] = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowerCamelCase)
self.finish_queue.extend(lowerCamelCase) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCamelCase ( self) -> deque[Process]:
"""simple docstring"""
for i in range(self.number_of_queues - 1):
_lowercase , _lowercase : str = self.round_robin(
self.ready_queue, self.time_slices[i])
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue)
return self.finish_queue
if __name__ == "__main__":
import doctest
SCREAMING_SNAKE_CASE : Union[str, Any] = Process("P1", 0, 53)
SCREAMING_SNAKE_CASE : str = Process("P2", 0, 17)
SCREAMING_SNAKE_CASE : Optional[Any] = Process("P3", 0, 68)
SCREAMING_SNAKE_CASE : Optional[Any] = Process("P4", 0, 24)
SCREAMING_SNAKE_CASE : Optional[int] = 3
SCREAMING_SNAKE_CASE : List[str] = [17, 25]
SCREAMING_SNAKE_CASE : List[Any] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
SCREAMING_SNAKE_CASE : List[str] = Process("P1", 0, 53)
SCREAMING_SNAKE_CASE : Optional[Any] = Process("P2", 0, 17)
SCREAMING_SNAKE_CASE : List[str] = Process("P3", 0, 68)
SCREAMING_SNAKE_CASE : Tuple = Process("P4", 0, 24)
SCREAMING_SNAKE_CASE : List[str] = 3
SCREAMING_SNAKE_CASE : Union[str, Any] = [17, 25]
SCREAMING_SNAKE_CASE : Optional[Any] = deque([Pa, Pa, Pa, Pa])
SCREAMING_SNAKE_CASE : str = MLFQ(number_of_queues, time_slices, queue, 0)
SCREAMING_SNAKE_CASE : Union[str, Any] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
F"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 21 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Dict = ShapEImgaImgPipeline
lowerCAmelCase__ : List[str] = ["""image"""]
lowerCAmelCase__ : Any = ["""image"""]
lowerCAmelCase__ : Any = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase__ : Tuple = False
@property
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
return 32
@property
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase__ (self : int ):
'''simple docstring'''
return 8
@property
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase__ = CLIPVisionModel(UpperCamelCase )
return model
@property
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase , do_normalize=UpperCamelCase , do_resize=UpperCamelCase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase__ = PriorTransformer(**UpperCamelCase )
return model
@property
def UpperCamelCase__ (self : int ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase__ = ShapERenderer(**UpperCamelCase )
return model
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.dummy_prior
lowercase__ = self.dummy_image_encoder
lowercase__ = self.dummy_image_processor
lowercase__ = self.dummy_renderer
lowercase__ = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase , clip_sample=UpperCamelCase , clip_sample_range=1.0 , )
lowercase__ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str=0 ):
'''simple docstring'''
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
if str(UpperCamelCase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase )
else:
lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
lowercase__ = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = '''cpu'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
lowercase__ = output.images[0]
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = torch_device == '''cpu'''
lowercase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase , relax_max_difference=UpperCamelCase , )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = 1
lowercase__ = 2
lowercase__ = self.get_dummy_inputs(UpperCamelCase )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ = batch_size * [inputs[key]]
lowercase__ = pipe(**UpperCamelCase , num_images_per_prompt=UpperCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowercase__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
lowercase__ = pipe(
UpperCamelCase , generator=UpperCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 2 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[Any] = ["""image_processor""", """tokenizer"""]
_lowerCamelCase : Dict = """BlipImageProcessor"""
_lowerCamelCase : int = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : str , snake_case_ : Dict , snake_case_ : Any ):
_UpperCAmelCase = False
super().__init__(snake_case_ , snake_case_ )
_UpperCAmelCase = self.image_processor
def __call__( self : Tuple , snake_case_ : ImageInput = None , snake_case_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case_ : bool = True , snake_case_ : Union[bool, str, PaddingStrategy] = False , snake_case_ : Union[bool, str, TruncationStrategy] = None , snake_case_ : Optional[int] = None , snake_case_ : int = 0 , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = True , snake_case_ : Optional[Union[str, TensorType]] = None , **snake_case_ : Optional[Any] , ):
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_UpperCAmelCase = self.tokenizer
_UpperCAmelCase = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
return text_encoding
# add pixel_values
_UpperCAmelCase = self.image_processor(snake_case_ , return_tensors=snake_case_ )
if text is not None:
_UpperCAmelCase = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
else:
_UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(snake_case_ )
return encoding_image_processor
def lowercase ( self : Optional[int] , *snake_case_ : Union[str, Any] , **snake_case_ : List[str] ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowercase ( self : Union[str, Any] , *snake_case_ : int , **snake_case_ : Optional[int] ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowercase ( self : Any ):
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 22 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase : str = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = MobileBertTokenizer
lowerCamelCase__ = MobileBertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = filter_non_english
lowerCamelCase__ = """google/mobilebert-uncased"""
def A ( self : Any ) -> int:
super().setUp()
UpperCAmelCase : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCAmelCase : List[Any] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def A ( self : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = '''UNwant\u00E9d,running'''
UpperCAmelCase : int = '''unwanted, running'''
return input_text, output_text
def A ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : List[str] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase : Dict = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__snake_case , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [9, 6, 7, 12, 10, 11] )
def A ( self : Any ) -> int:
if not self.test_rust_tokenizer:
return
UpperCAmelCase : Optional[int] = self.get_tokenizer()
UpperCAmelCase : Any = self.get_rust_tokenizer()
UpperCAmelCase : int = '''UNwant\u00E9d,running'''
UpperCAmelCase : Dict = tokenizer.tokenize(__snake_case )
UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase : Optional[int] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
UpperCAmelCase : int = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase : int = self.get_rust_tokenizer()
UpperCAmelCase : int = tokenizer.encode(__snake_case )
UpperCAmelCase : str = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# With lower casing
UpperCAmelCase : Dict = self.get_tokenizer(do_lower_case=__snake_case )
UpperCAmelCase : int = self.get_rust_tokenizer(do_lower_case=__snake_case )
UpperCAmelCase : Dict = '''UNwant\u00E9d,running'''
UpperCAmelCase : int = tokenizer.tokenize(__snake_case )
UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase : int = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
UpperCAmelCase : List[Any] = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase : Union[str, Any] = tokenizer.encode(__snake_case )
UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def A ( self : List[str] ) -> List[str]:
UpperCAmelCase : str = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def A ( self : Optional[int] ) -> Any:
UpperCAmelCase : Tuple = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self : Optional[int] ) -> int:
UpperCAmelCase : Optional[Any] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def A ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self : str ) -> Optional[int]:
UpperCAmelCase : Any = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self : Tuple ) -> Any:
UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : List[str] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : Tuple = BasicTokenizer(do_lower_case=__snake_case , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def A ( self : List[Any] ) -> Dict:
UpperCAmelCase : List[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
UpperCAmelCase : Tuple = {}
for i, token in enumerate(__snake_case ):
UpperCAmelCase : List[str] = i
UpperCAmelCase : str = WordpieceTokenizer(vocab=__snake_case , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def A ( self : Union[str, Any] ) -> Tuple:
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def A ( self : Union[str, Any] ) -> Any:
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def A ( self : Optional[int] ) -> Tuple:
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Dict = self.get_tokenizer()
UpperCAmelCase : int = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__snake_case ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__snake_case ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def A ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
UpperCAmelCase : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
UpperCAmelCase : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(__snake_case )
UpperCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def A ( self : Optional[Any] ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
UpperCAmelCase : str = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCAmelCase : Optional[int] = tokenizer_r.encode_plus(
__snake_case , return_attention_mask=__snake_case , return_token_type_ids=__snake_case , return_offsets_mapping=__snake_case , add_special_tokens=__snake_case , )
UpperCAmelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(__snake_case , '''do_lower_case''' ) else False
UpperCAmelCase : str = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def A ( self : Optional[int] ) -> str:
UpperCAmelCase : str = ['''的''', '''人''', '''有''']
UpperCAmelCase : List[Any] = ''''''.join(__snake_case )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
UpperCAmelCase : List[Any] = tokenizer_p.encode(__snake_case , add_special_tokens=__snake_case )
UpperCAmelCase : Tuple = tokenizer_r.encode(__snake_case , add_special_tokens=__snake_case )
UpperCAmelCase : Any = tokenizer_r.convert_ids_to_tokens(__snake_case )
UpperCAmelCase : Optional[int] = tokenizer_p.convert_ids_to_tokens(__snake_case )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
UpperCAmelCase : int = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
UpperCAmelCase : List[Any] = tokenizer_r.encode(__snake_case , add_special_tokens=__snake_case )
UpperCAmelCase : Tuple = tokenizer_p.encode(__snake_case , add_special_tokens=__snake_case )
UpperCAmelCase : List[str] = tokenizer_r.convert_ids_to_tokens(__snake_case )
UpperCAmelCase : Optional[int] = tokenizer_p.convert_ids_to_tokens(__snake_case )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase : Optional[Any] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(__snake_case )
]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , __snake_case )
| 23 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = """realm"""
def __init__(self : str , UpperCamelCase : List[Any]=30522 , UpperCamelCase : List[Any]=768 , UpperCamelCase : int=128 , UpperCamelCase : Any=12 , UpperCamelCase : Tuple=12 , UpperCamelCase : List[Any]=8 , UpperCamelCase : Union[str, Any]=3072 , UpperCamelCase : List[str]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Dict=512 , UpperCamelCase : Dict=2 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : List[Any]=1E-12 , UpperCamelCase : Dict=256 , UpperCamelCase : Union[str, Any]=10 , UpperCamelCase : Optional[int]=1E-3 , UpperCamelCase : Tuple=5 , UpperCamelCase : Optional[int]=320 , UpperCamelCase : List[str]=13353718 , UpperCamelCase : Optional[Any]=5000 , UpperCamelCase : str=1 , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : List[Any]=2 , **UpperCamelCase : int , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
# Common config
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = retriever_proj_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_candidates
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = type_vocab_size
lowercase__ = layer_norm_eps
# Reader config
lowercase__ = span_hidden_size
lowercase__ = max_span_width
lowercase__ = reader_layer_norm_eps
lowercase__ = reader_beam_size
lowercase__ = reader_seq_len
# Retrieval config
lowercase__ = num_block_records
lowercase__ = searcher_beam_size
| 2 | 0 |
from collections import defaultdict
from math import gcd
def lowerCamelCase__ ( snake_case_ : int = 150_0000 ) -> int:
__snake_case = defaultdict(snake_case_ )
__snake_case = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , snake_case_ , 2 ):
if gcd(snake_case_ , snake_case_ ) > 1:
continue
__snake_case = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(snake_case_ , limit + 1 , snake_case_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'{solution() = }')
| 24 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : int = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = """mvp"""
lowerCAmelCase__ : Optional[Any] = ["""past_key_values"""]
lowerCAmelCase__ : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self : Any , UpperCamelCase : Optional[int]=50267 , UpperCamelCase : Tuple=1024 , UpperCamelCase : int=12 , UpperCamelCase : Tuple=4096 , UpperCamelCase : Dict=16 , UpperCamelCase : int=12 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : Optional[int]=16 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Union[str, Any]=1024 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : str=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : List[str]=False , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=1 , UpperCamelCase : int=0 , UpperCamelCase : int=2 , UpperCamelCase : Any=True , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Tuple=False , UpperCamelCase : int=100 , UpperCamelCase : Optional[Any]=800 , **UpperCamelCase : str , ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = classifier_dropout
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = use_prompt
lowercase__ = prompt_length
lowercase__ = prompt_mid_dim
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , **UpperCamelCase , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , UpperCamelCase ):
lowercase__ = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
'''The config can simply be saved and uploaded again to be fixed.''' )
| 2 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : List[Any] = BlipImageProcessor()
SCREAMING_SNAKE_CASE__ : Optional[Any] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
SCREAMING_SNAKE_CASE__ : str = InstructBlipProcessor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).tokenizer
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).qformer_tokenizer
def __magic_name__ (self ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : int = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE__ : Any = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ : List[Any] = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor.qformer_tokenizer , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Tuple = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : str = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , qformer_tokenizer=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : str = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : str = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , qformer_tokenizer=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """lower newer"""
SCREAMING_SNAKE_CASE__ : int = processor(text=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = tokenizer(SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = qformer_tokenizer(SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : int = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[Any] = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , qformer_tokenizer=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = """lower newer"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Any = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Any = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , qformer_tokenizer=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ : Optional[int] = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Tuple = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : List[str] = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , qformer_tokenizer=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = """lower newer"""
SCREAMING_SNAKE_CASE__ : Tuple = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Any = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 25 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : List[str] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : int = DebertaVaTokenizer
lowerCAmelCase__ : List[Any] = DebertaVaTokenizerFast
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Tuple = True
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = DebertaVaTokenizer(UpperCamelCase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = '''this is a test'''
lowercase__ = '''this is a test'''
return input_text, output_text
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''<pad>'''
lowercase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(UpperCamelCase ) , 30001 )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = ''' \tHeLLo!how \n Are yoU? '''
lowercase__ = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = ''' \tHeLLo!how \n Are yoU? '''
lowercase__ = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = '''This is a test'''
lowercase__ = [13, 1, 4398, 25, 21, 1289]
lowercase__ = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__ = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__ = DebertaVaTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , keep_accents=UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# fmt: off
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase__ = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = DebertaVaTokenizer(UpperCamelCase )
lowercase__ = tokenizer.encode('''sequence builders''' )
lowercase__ = tokenizer.encode('''multi-sequence build''' )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase , )
@slow
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 2 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A , _A : Any = set(snake_case_ ), [start]
while stack:
_A : Union[str, Any] = stack.pop()
explored.add(snake_case_ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(snake_case_ )
return explored
_snake_case = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 26 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _SCREAMING_SNAKE_CASE (A ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A , A )
def _SCREAMING_SNAKE_CASE (A ) -> List[str]:
"""simple docstring"""
lowercase__ ,lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(A , A , bias=A )
lowercase__ = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE (A , A="facebook/mbart-large-en-ro" , A=False , A=False ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = torch.load(A , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A )
lowercase__ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase__ = MBartConfig.from_pretrained(A , vocab_size=A )
if mbart_aa and finetuned:
lowercase__ = '''relu'''
lowercase__ = state_dict['''decoder.embed_tokens.weight''']
lowercase__ = MBartForConditionalGeneration(A )
model.model.load_state_dict(A )
if finetuned:
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
lowerCamelCase : Any = parser.parse_args()
lowerCamelCase : List[str] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 2 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __UpperCamelCase :
A_ = 42
A_ = None
# Automatically constructed
A_ = "dict"
A_ = None
A_ = field(default="Translation" , init=lowerCAmelCase_ , repr=lowerCAmelCase_ )
def __call__( self ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __UpperCAmelCase ( self ):
'''simple docstring'''
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __UpperCamelCase :
A_ = None
A_ = None
A_ = None
# Automatically constructed
A_ = "dict"
A_ = None
A_ = field(default="TranslationVariableLanguages" , init=lowerCAmelCase_ , repr=lowerCAmelCase_ )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = sorted(set(self.languages ) ) if self.languages else None
__a : int = len(self.languages ) if self.languages else None
def __call__( self ):
'''simple docstring'''
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : int = set(self.languages )
if self.languages and set(__a ) - lang_set:
raise ValueError(
f"""Some languages in example ({", ".join(sorted(set(__a ) - lang_set ) )}) are not in valid set ({", ".join(__a )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__a : Optional[int] = []
for lang, text in translation_dict.items():
if isinstance(__a , __a ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__a , __a : str = zip(*sorted(__a ) )
return {"language": languages, "translation": translations}
def __UpperCAmelCase ( self ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 27 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowerCamelCase : List[Any] = logging.getLogger(__name__)
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCamelCase : Any=-1 ):
'''simple docstring'''
lowercase__ = label_idx
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[Split, str] ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ):
lowercase__ = mode.value
lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" )
lowercase__ = 1
lowercase__ = []
with open(UpperCamelCase , encoding='''utf-8''' ) as f:
lowercase__ = []
lowercase__ = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) )
guid_index += 1
lowercase__ = []
lowercase__ = []
else:
lowercase__ = line.split(''' ''' )
words.append(splits[0] )
if len(UpperCamelCase ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) )
return examples
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ):
'''simple docstring'''
lowercase__ = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(UpperCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowercase__ = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(UpperCamelCase )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
if path:
with open(UpperCamelCase , '''r''' ) as f:
lowercase__ = f.read().splitlines()
if "O" not in labels:
lowercase__ = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : List[Any] ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : str ):
'''simple docstring'''
if path:
with open(UpperCamelCase , '''r''' ) as f:
lowercase__ = f.read().splitlines()
if "O" not in labels:
lowercase__ = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def UpperCamelCase__ (self : Tuple , UpperCamelCase : int , UpperCamelCase : Union[Split, str] ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ):
lowercase__ = mode.value
lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" )
lowercase__ = 1
lowercase__ = []
with open(UpperCamelCase , encoding='''utf-8''' ) as f:
for sentence in parse_incr(UpperCamelCase ):
lowercase__ = []
lowercase__ = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(UpperCamelCase ) == len(UpperCamelCase )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) )
guid_index += 1
return examples
def UpperCamelCase__ (self : Tuple , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ):
'''simple docstring'''
lowercase__ = 0
for sentence in parse_incr(UpperCamelCase ):
lowercase__ = preds_list[example_id]
lowercase__ = ''''''
for token in sentence:
out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) "
out += "\n"
writer.write(UpperCamelCase )
example_id += 1
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
if path:
with open(UpperCamelCase , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 2 | 0 |
'''simple docstring'''
def __lowerCamelCase ( A__ ) -> bool:
"""simple docstring"""
UpperCamelCase = 0
for ch in input_str:
UpperCamelCase = ord(A__ )
UpperCamelCase = pow(2 , A__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = """megatron-bert"""
def __init__(self : Tuple , UpperCamelCase : Optional[int]=29056 , UpperCamelCase : Optional[Any]=1024 , UpperCamelCase : Any=24 , UpperCamelCase : int=16 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : int="gelu" , UpperCamelCase : int=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Any=512 , UpperCamelCase : int=2 , UpperCamelCase : Dict=0.02 , UpperCamelCase : Dict=1E-12 , UpperCamelCase : List[Any]=0 , UpperCamelCase : Optional[int]="absolute" , UpperCamelCase : List[Any]=True , **UpperCamelCase : str , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
| 2 | 0 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = 9, 14 # noqa: F841
UpperCAmelCase_ : Optional[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCAmelCase_ : int = defaultdict(__snake_case )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
UpperCAmelCase_ : List[Any] = mst(__snake_case )
UpperCAmelCase_ : Any = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
UpperCAmelCase_ : str = tuple(answer[:2] )
UpperCAmelCase_ : Union[str, Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 29 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
lowerCamelCase : Any = re.compile(R'([A-Z]+)([A-Z][a-z])')
lowerCamelCase : str = re.compile(R'([a-z\d])([A-Z])')
lowerCamelCase : Optional[int] = re.compile(R'(?<!_)_(?!_)')
lowerCamelCase : List[Any] = re.compile(R'(_{2,})')
lowerCamelCase : str = R'^\w+(\.\w+)*$'
lowerCamelCase : Dict = R'<>:/\|?*'
def _SCREAMING_SNAKE_CASE (A ) -> Any:
"""simple docstring"""
lowercase__ = _uppercase_uppercase_re.sub(R'''\1_\2''' , A )
lowercase__ = _lowercase_uppercase_re.sub(R'''\1_\2''' , A )
return name.lower()
def _SCREAMING_SNAKE_CASE (A ) -> Tuple:
"""simple docstring"""
lowercase__ = _single_underscore_re.split(A )
lowercase__ = [_multiple_underscores_re.split(A ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(A ) if n != '''''' )
def _SCREAMING_SNAKE_CASE (A ) -> Tuple:
"""simple docstring"""
if os.path.basename(A ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]:
"""simple docstring"""
if os.path.basename(A ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , A ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(A )}-{split}"
def _SCREAMING_SNAKE_CASE (A , A , A , A=None ) -> List[str]:
"""simple docstring"""
lowercase__ = filename_prefix_for_split(A , A )
if filetype_suffix:
prefix += f".{filetype_suffix}"
lowercase__ = os.path.join(A , A )
return f"{filepath}*"
def _SCREAMING_SNAKE_CASE (A , A , A , A=None , A=None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = filename_prefix_for_split(A , A )
lowercase__ = os.path.join(A , A )
if shard_lengths:
lowercase__ = len(A )
lowercase__ = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(A )]
if filetype_suffix:
lowercase__ = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
lowercase__ = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 2 | 0 |
class lowercase__:
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ) -> List[str]:
lowercase_ = data
lowercase_ = previous
lowercase_ = next_node
def __str__( self : Tuple ) -> str:
return f'''{self.data}'''
def _lowercase ( self : Any ) -> int:
return self.data
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
return self.next
def _lowercase ( self : Tuple ) -> Any:
return self.previous
class lowercase__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> str:
lowercase_ = head
def __iter__( self : Union[str, Any] ) -> str:
return self
def _lowercase ( self : List[str] ) -> Tuple:
if not self.current:
raise StopIteration
else:
lowercase_ = self.current.get_data()
lowercase_ = self.current.get_next()
return value
class lowercase__:
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> Any:
lowercase_ = None # First node in list
lowercase_ = None # Last node in list
def __str__( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ = self.head
lowercase_ = []
while current is not None:
nodes.append(current.get_data() )
lowercase_ = current.get_next()
return " ".join(str(SCREAMING_SNAKE_CASE_ ) for node in nodes )
def __contains__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
lowercase_ = self.head
while current:
if current.get_data() == value:
return True
lowercase_ = current.get_next()
return False
def __iter__( self : Dict ) -> Tuple:
return LinkedListIterator(self.head )
def _lowercase ( self : Any ) -> Optional[Any]:
if self.head:
return self.head.get_data()
return None
def _lowercase ( self : Any ) -> Any:
if self.tail:
return self.tail.get_data()
return None
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Node ) -> None:
if self.head is None:
lowercase_ = node
lowercase_ = node
else:
self.insert_before_node(self.head , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Node ) -> None:
if self.head is None:
self.set_head(SCREAMING_SNAKE_CASE_ )
else:
self.insert_after_node(self.tail , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> None:
lowercase_ = Node(SCREAMING_SNAKE_CASE_ )
if self.head is None:
self.set_head(SCREAMING_SNAKE_CASE_ )
else:
self.set_tail(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Node , SCREAMING_SNAKE_CASE_ : Node ) -> None:
lowercase_ = node
lowercase_ = node.previous
if node.get_previous() is None:
lowercase_ = node_to_insert
else:
lowercase_ = node_to_insert
lowercase_ = node_to_insert
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Node , SCREAMING_SNAKE_CASE_ : Node ) -> None:
lowercase_ = node
lowercase_ = node.next
if node.get_next() is None:
lowercase_ = node_to_insert
else:
lowercase_ = node_to_insert
lowercase_ = node_to_insert
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> None:
lowercase_ = 1
lowercase_ = Node(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.head
while node:
if current_position == position:
self.insert_before_node(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return
current_position += 1
lowercase_ = node.next
self.insert_after_node(self.tail , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int ) -> Node:
lowercase_ = self.head
while node:
if node.get_data() == item:
return node
lowercase_ = node.get_next()
raise Exception('''Node not found''' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
if (node := self.get_node(SCREAMING_SNAKE_CASE_ )) is not None:
if node == self.head:
lowercase_ = self.head.get_next()
if node == self.tail:
lowercase_ = self.tail.get_previous()
self.remove_node_pointers(SCREAMING_SNAKE_CASE_ )
@staticmethod
def _lowercase ( SCREAMING_SNAKE_CASE_ : Node ) -> None:
if node.get_next():
lowercase_ = node.previous
if node.get_previous():
lowercase_ = node.next
lowercase_ = None
lowercase_ = None
def _lowercase ( self : Tuple ) -> Tuple:
return self.head is None
def a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30 |
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : str , UpperCamelCase : Tuple , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Optional[int]=13 , UpperCamelCase : Tuple=16 , UpperCamelCase : Union[str, Any]=7 , UpperCamelCase : List[Any]=True , UpperCamelCase : List[str]=True , UpperCamelCase : str=True , UpperCamelCase : Tuple=False , UpperCamelCase : str=True , UpperCamelCase : Tuple=2 , UpperCamelCase : Optional[int]=32 , UpperCamelCase : Any=4 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Tuple=30 , UpperCamelCase : str=0 , UpperCamelCase : Tuple=1 , UpperCamelCase : List[Any]=2 , UpperCamelCase : str=None , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = decoder_seq_length
# For common tests
lowercase__ = self.decoder_seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_model
lowercase__ = decoder_layers
lowercase__ = decoder_layers
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_attention_heads
lowercase__ = decoder_attention_heads
lowercase__ = eos_token_id
lowercase__ = bos_token_id
lowercase__ = pad_token_id
lowercase__ = decoder_start_token_id
lowercase__ = use_cache
lowercase__ = max_position_embeddings
lowercase__ = None
lowercase__ = decoder_seq_length
lowercase__ = 2
lowercase__ = 1
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
lowercase__ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def UpperCamelCase__ (self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , ):
'''simple docstring'''
lowercase__ = True
lowercase__ = TrOCRDecoder(config=UpperCamelCase ).to(UpperCamelCase ).eval()
lowercase__ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase )
lowercase__ = model(UpperCamelCase )
lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 )
lowercase__ = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
lowercase__ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
lowercase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__ = model(UpperCamelCase )['''last_hidden_state''']
lowercase__ = model(UpperCamelCase , past_key_values=UpperCamelCase )['''last_hidden_state''']
# select random slice
lowercase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
lowercase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase (lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCAmelCase__ : List[Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCAmelCase__ : Optional[Any] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : List[str] = False
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
lowercase__ = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCamelCase )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*UpperCamelCase )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
| 2 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : List[Any] = emb.weight.shape
_UpperCAmelCase : Optional[int] = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
_UpperCAmelCase : List[str] = emb.weight.data
return lin_layer
def UpperCamelCase_ ( _UpperCAmelCase : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[Any] = torch.load(_UpperCAmelCase , map_location="cpu" )
_UpperCAmelCase : Any = mam_aaa["args"] or mam_aaa["cfg"]["model"]
_UpperCAmelCase : List[Any] = mam_aaa["model"]
remove_ignore_keys_(_UpperCAmelCase )
_UpperCAmelCase : int = state_dict["encoder.embed_tokens.weight"].shape[0]
_UpperCAmelCase : Tuple = MaMaaaConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
_UpperCAmelCase : Union[str, Any] = state_dict["decoder.embed_tokens.weight"]
_UpperCAmelCase : Union[str, Any] = MaMaaaForConditionalGeneration(_UpperCAmelCase )
model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
_UpperCAmelCase : Optional[Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
__SCREAMING_SNAKE_CASE : Optional[int] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 31 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
if not isinstance(A , A ):
raise TypeError('''only integers accepted as input''' )
else:
lowercase__ = str(abs(A ) )
lowercase__ = [list(A ) for char in range(len(A ) )]
for index in range(len(A ) ):
num_transpositions[index].pop(A )
return max(
int(''''''.join(list(A ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 2 | 0 |
def SCREAMING_SNAKE_CASE_ ( __A : int = 10_00 ) -> int:
"""simple docstring"""
a_ : Union[str, Any] = 2**power
a_ : Tuple = str(__A )
a_ : int = list(__A )
a_ : Optional[Any] = 0
for i in list_num:
sum_of_num += int(__A )
return sum_of_num
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
UpperCAmelCase_ : Dict = solution(power)
print('Sum of the digits is: ', result)
| 32 |
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowerCamelCase : str = Mapping[str, np.ndarray]
lowerCamelCase : List[Any] = Mapping[str, Any] # Is a nested dict.
lowerCamelCase : Any = 0.0_1
@dataclasses.dataclass(frozen=lowercase_ )
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowerCAmelCase__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowerCAmelCase__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowerCAmelCase__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowerCAmelCase__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
lowerCAmelCase__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
lowerCAmelCase__ : Optional[Sequence[int]] = None
def _SCREAMING_SNAKE_CASE (A ) -> Protein:
"""simple docstring"""
lowercase__ = R'''(\[[A-Z]+\]\n)'''
lowercase__ = [tag.strip() for tag in re.split(A , A ) if len(A ) > 0]
lowercase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
lowercase__ = ["N", "CA", "C"]
lowercase__ = None
lowercase__ = None
lowercase__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowercase__ = g[1][0].strip()
for i in range(len(A ) ):
if seq[i] not in residue_constants.restypes:
lowercase__ = '''X''' # FIXME: strings are immutable
lowercase__ = np.array(
[residue_constants.restype_order.get(A , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowercase__ = []
for axis in range(3 ):
tertiary.append(list(map(A , g[1][axis].split() ) ) )
lowercase__ = np.array(A )
lowercase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(A ):
lowercase__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowercase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
lowercase__ = np.zeros(
(
len(A ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(A ):
lowercase__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=A , atom_mask=A , aatype=A , residue_index=np.arange(len(A ) ) , b_factors=A , )
def _SCREAMING_SNAKE_CASE (A , A = 0 ) -> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = prot.remark
if remark is not None:
pdb_headers.append(f"REMARK {remark}" )
lowercase__ = prot.parents
lowercase__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowercase__ = [p for i, p in zip(A , A ) if i == chain_id]
if parents is None or len(A ) == 0:
lowercase__ = ['''N/A''']
pdb_headers.append(f"PARENT {' '.join(A )}" )
return pdb_headers
def _SCREAMING_SNAKE_CASE (A , A ) -> str:
"""simple docstring"""
lowercase__ = []
lowercase__ = pdb_str.split('''\n''' )
lowercase__ = prot.remark
if remark is not None:
out_pdb_lines.append(f"REMARK {remark}" )
lowercase__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowercase__ = []
if prot.parents_chain_index is not None:
lowercase__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(A ) , [] )
parent_dict[str(A )].append(A )
lowercase__ = max([int(A ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowercase__ = parent_dict.get(str(A ) , ['''N/A'''] )
parents_per_chain.append(A )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowercase__ = [['''N/A''']]
def make_parent_line(A ) -> str:
return f"PARENT {' '.join(A )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowercase__ = 0
for i, l in enumerate(A ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(A )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(A ):
lowercase__ = parents_per_chain[chain_counter]
else:
lowercase__ = ['''N/A''']
out_pdb_lines.append(make_parent_line(A ) )
return "\n".join(A )
def _SCREAMING_SNAKE_CASE (A ) -> str:
"""simple docstring"""
lowercase__ = residue_constants.restypes + ['''X''']
def res_atoa(A ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
lowercase__ = residue_constants.atom_types
lowercase__ = []
lowercase__ = prot.atom_mask
lowercase__ = prot.aatype
lowercase__ = prot.atom_positions
lowercase__ = prot.residue_index.astype(np.intaa )
lowercase__ = prot.b_factors
lowercase__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
lowercase__ = get_pdb_headers(A )
if len(A ) > 0:
pdb_lines.extend(A )
lowercase__ = aatype.shape[0]
lowercase__ = 1
lowercase__ = 0
lowercase__ = string.ascii_uppercase
lowercase__ = None
# Add all atom sites.
for i in range(A ):
lowercase__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(A , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowercase__ = '''ATOM'''
lowercase__ = atom_name if len(A ) == 4 else f" {atom_name}"
lowercase__ = ''''''
lowercase__ = ''''''
lowercase__ = 1.00
lowercase__ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowercase__ = ''''''
lowercase__ = '''A'''
if chain_index is not None:
lowercase__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowercase__ = (
f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
f"{res_name_a:>3} {chain_tag:>1}"
f"{residue_index[i]:>4}{insertion_code:>1} "
f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
f"{occupancy:>6.2f}{b_factor:>6.2f} "
f"{element:>2}{charge:>2}"
)
pdb_lines.append(A )
atom_index += 1
lowercase__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowercase__ = True
lowercase__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowercase__ = '''TER'''
lowercase__ = (
f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(A )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(A , A ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(A )
def _SCREAMING_SNAKE_CASE (A ) -> np.ndarray:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _SCREAMING_SNAKE_CASE (A , A , A = None , A = None , A = None , A = None , A = None , ) -> Protein:
"""simple docstring"""
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=A , remark=A , parents=A , parents_chain_index=A , )
| 2 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A : Any = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ['''DeiTFeatureExtractor''']
__A : Any = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (A , A ) -> list[list[int]]:
"""simple docstring"""
lowercase__ = []
create_all_state(1 , A , A , [] , A )
return result
def _SCREAMING_SNAKE_CASE (A , A , A , A , A , ) -> None:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(A , total_number - level + 2 ):
current_list.append(A )
create_all_state(i + 1 , A , level - 1 , A , A )
current_list.pop()
def _SCREAMING_SNAKE_CASE (A ) -> None:
"""simple docstring"""
for i in total_list:
print(*A )
if __name__ == "__main__":
lowerCamelCase : Tuple = 4
lowerCamelCase : Union[str, Any] = 2
lowerCamelCase : Dict = generate_all_combinations(n, k)
print_all_state(total_list)
| 2 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def snake_case_ (_a : Tuple ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def snake_case_ ():
UpperCAmelCase = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a )
UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_a )
EnvironmentCommand.register_subcommand(_a )
TestCommand.register_subcommand(_a )
RunBeamCommand.register_subcommand(_a )
DummyDataCommand.register_subcommand(_a )
# Parse args
UpperCAmelCase , UpperCAmelCase = parser.parse_known_args()
if not hasattr(_a , '''func''' ):
parser.print_help()
exit(1 )
UpperCAmelCase = parse_unknown_args(_a )
# Run
UpperCAmelCase = args.func(_a , **_a )
service.run()
if __name__ == "__main__":
main()
| 34 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCamelCase : Optional[Any] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
lowerCamelCase : Tuple = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
lowerCamelCase : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
lowerCamelCase : Any = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
lowerCamelCase : Tuple = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
lowerCamelCase : Optional[int] = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
lowerCamelCase : Dict = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
"""simple docstring"""
lowercase__ ,lowercase__ = randrange(len(A ) ), randrange(len(A ) )
lowercase__ = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
lowercase__ ,lowercase__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _SCREAMING_SNAKE_CASE (A = 100 ) -> str:
"""simple docstring"""
return (generate_random_hand() for _ in range(A ))
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]:
"""simple docstring"""
assert PokerHand(A )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(A )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , A )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Any:
"""simple docstring"""
lowercase__ = PokerHand(A )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Tuple:
"""simple docstring"""
assert PokerHand(A )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(A )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected
def _SCREAMING_SNAKE_CASE () -> Tuple:
"""simple docstring"""
lowercase__ = [PokerHand(A ) for hand in SORTED_HANDS]
lowercase__ = poker_hands.copy()
shuffle(A )
lowercase__ = chain(sorted(A ) )
for index, hand in enumerate(A ):
assert hand == poker_hands[index]
def _SCREAMING_SNAKE_CASE () -> List[Any]:
"""simple docstring"""
lowercase__ = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=A )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _SCREAMING_SNAKE_CASE () -> int:
"""simple docstring"""
lowercase__ = PokerHand('''2C 4S AS 3D 5C''' )
lowercase__ = True
lowercase__ = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
"""simple docstring"""
lowercase__ = 0
lowercase__ = os.path.abspath(os.path.dirname(A ) )
lowercase__ = os.path.join(A , '''poker_hands.txt''' )
with open(A ) as file_hand:
for line in file_hand:
lowercase__ = line[:14].strip()
lowercase__ = line[15:].strip()
lowercase__ ,lowercase__ = PokerHand(A ), PokerHand(A )
lowercase__ = player.compare_with(A )
if output == "Win":
answer += 1
assert answer == 376
| 2 | 0 |
'''simple docstring'''
import argparse
import os
import re
__a = "src/transformers"
# Pattern that looks at the indentation in a line.
__a = re.compile(R"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
__a = re.compile(R"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__a = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
__a = re.compile(R"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__a = re.compile(R"\[([^\]]+)\]")
def __snake_case( _lowerCAmelCase ) -> List[Any]:
snake_case__ : int = _re_indent.search(_lowerCAmelCase )
return "" if search is None else search.groups()[0]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
snake_case__ : str = 0
snake_case__ : Union[str, Any] = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCAmelCase ):
index += 1
snake_case__ : Tuple = ["""\n""".join(lines[:index] )]
else:
snake_case__ : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
snake_case__ : Optional[int] = [lines[index]]
index += 1
while index < len(_lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCAmelCase ) )
if index < len(_lowerCAmelCase ) - 1:
snake_case__ : str = [lines[index + 1]]
index += 1
else:
snake_case__ : int = []
else:
blocks.append("""\n""".join(_lowerCAmelCase ) )
snake_case__ : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCAmelCase ) > 0:
blocks.append("""\n""".join(_lowerCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCAmelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def __snake_case( _lowerCAmelCase ) -> Tuple:
def _inner(_lowerCAmelCase ):
return key(_lowerCAmelCase ).lower().replace("""_""" , """""" )
return _inner
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(_lowerCAmelCase ):
return x
if key is None:
snake_case__ : Optional[int] = noop
# Constants are all uppercase, they go first.
snake_case__ : Optional[int] = [obj for obj in objects if key(_lowerCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
snake_case__ : int = [obj for obj in objects if key(_lowerCAmelCase )[0].isupper() and not key(_lowerCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
snake_case__ : str = [obj for obj in objects if not key(_lowerCAmelCase )[0].isupper()]
snake_case__ : List[str] = ignore_underscore(_lowerCAmelCase )
return sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> int:
# This inner function sort imports between [ ].
def _replace(_lowerCAmelCase ):
snake_case__ : Union[str, Any] = match.groups()[0]
if "," not in imports:
return f"[{imports}]"
snake_case__ : int = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : List[str] = keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] ) + "]"
snake_case__ : str = import_statement.split("""\n""" )
if len(_lowerCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
snake_case__ : Dict = 2 if lines[1].strip() == """[""" else 1
snake_case__ : str = [(i, _re_strip_line.search(_lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
snake_case__ : str = sort_objects(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )
snake_case__ : Union[str, Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
snake_case__ : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
snake_case__ : List[Any] = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : List[str] = keys[:-1]
snake_case__ : int = get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] )
return "\n".join(_lowerCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
snake_case__ : Optional[Any] = _re_bracket_content.sub(_replace , _lowerCAmelCase )
return import_statement
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=True ) -> Dict:
with open(_lowerCAmelCase , encoding="""utf-8""" ) as f:
snake_case__ : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
snake_case__ : Optional[int] = split_code_in_indented_blocks(
_lowerCAmelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
snake_case__ : Optional[Any] = main_blocks[block_idx]
snake_case__ : Dict = block.split("""\n""" )
# Get to the start of the imports.
snake_case__ : Dict = 0
while line_idx < len(_lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
snake_case__ : Union[str, Any] = len(_lowerCAmelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
snake_case__ : List[str] = """\n""".join(block_lines[line_idx:-1] )
snake_case__ : str = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
snake_case__ : Optional[int] = split_code_in_indented_blocks(_lowerCAmelCase , indent_level=_lowerCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
snake_case__ : Tuple = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
snake_case__ : Optional[Any] = [(pattern.search(_lowerCAmelCase ).groups()[0] if pattern.search(_lowerCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
snake_case__ : Dict = [(i, key) for i, key in enumerate(_lowerCAmelCase ) if key is not None]
snake_case__ : Union[str, Any] = [x[0] for x in sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
snake_case__ : List[Any] = 0
snake_case__ : Optional[Any] = []
for i in range(len(_lowerCAmelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
snake_case__ : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_lowerCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
snake_case__ : Dict = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCAmelCase ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(_lowerCAmelCase ) )
def __snake_case( _lowerCAmelCase=True ) -> Tuple:
snake_case__ : str = []
for root, _, files in os.walk(_lowerCAmelCase ):
if "__init__.py" in files:
snake_case__ : Union[str, Any] = sort_imports(os.path.join(_lowerCAmelCase , """__init__.py""" ) , check_only=_lowerCAmelCase )
if result:
snake_case__ : Union[str, Any] = [os.path.join(_lowerCAmelCase , """__init__.py""" )]
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"Would overwrite {len(_lowerCAmelCase )} files, run `make style`." )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__a = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 35 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase : str = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase : List[Any] = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase : Any = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCamelCase : int = model.state_dict()
lowerCamelCase : int = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
lowerCamelCase : Tuple = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
lowerCamelCase : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
lowerCamelCase : List[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
lowerCamelCase : Tuple = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
lowerCamelCase : Optional[int] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
lowerCamelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
lowerCamelCase : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
lowerCamelCase : Any = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
lowerCamelCase : Optional[int] = state_dict['cls.predictions.decoder.weight']
lowerCamelCase : str = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase : str = state_dict[f"""cls.predictions.transform.dense.{w}"""]
lowerCamelCase : Any = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 2 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 36 |
'''simple docstring'''
from ....utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : int=2048 ):
'''simple docstring'''
lowercase__ = config.__dict__
lowercase__ = modal_hidden_size
if num_labels:
lowercase__ = num_labels
| 2 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , )
)
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
lowerCAmelCase__ : Optional[int] = math.log(len(UpperCamelCase ) , 2 )
print(f"""Optimal value : {minimax(0 , 0 , UpperCamelCase , UpperCamelCase , UpperCamelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 37 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = """cvt"""
def __init__(self : int , UpperCamelCase : List[Any]=3 , UpperCamelCase : int=[7, 3, 3] , UpperCamelCase : str=[4, 2, 2] , UpperCamelCase : Dict=[2, 1, 1] , UpperCamelCase : Dict=[64, 192, 384] , UpperCamelCase : Dict=[1, 3, 6] , UpperCamelCase : Dict=[1, 2, 10] , UpperCamelCase : Any=[4.0, 4.0, 4.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : int=[0.0, 0.0, 0.1] , UpperCamelCase : Any=[True, True, True] , UpperCamelCase : int=[False, False, True] , UpperCamelCase : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase : Optional[int]=[3, 3, 3] , UpperCamelCase : Tuple=[1, 1, 1] , UpperCamelCase : Any=[2, 2, 2] , UpperCamelCase : Dict=[1, 1, 1] , UpperCamelCase : List[str]=[1, 1, 1] , UpperCamelCase : str=0.02 , UpperCamelCase : int=1E-12 , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = patch_stride
lowercase__ = patch_padding
lowercase__ = embed_dim
lowercase__ = num_heads
lowercase__ = depth
lowercase__ = mlp_ratio
lowercase__ = attention_drop_rate
lowercase__ = drop_rate
lowercase__ = drop_path_rate
lowercase__ = qkv_bias
lowercase__ = cls_token
lowercase__ = qkv_projection_method
lowercase__ = kernel_qkv
lowercase__ = padding_kv
lowercase__ = stride_kv
lowercase__ = padding_q
lowercase__ = stride_q
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
| 2 | 0 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
UpperCAmelCase_ : str = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(__magic_name__ , torch.Tensor ):
return image
elif isinstance(__magic_name__ , PIL.Image.Image ):
UpperCamelCase :Union[str, Any] = [image]
UpperCamelCase :Tuple = [trans(img.convert("""RGB""" ) ) for img in image]
UpperCamelCase :int = torch.stack(__magic_name__ )
return image
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Any ):
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase :List[Any] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
def _A ( self : Dict , __lowerCamelCase : List[str] ):
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def _A ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ):
# get the original timestep using init_timestep
UpperCamelCase :Optional[Any] = min(int(num_inference_steps * strength ) , __lowerCamelCase )
UpperCamelCase :Dict = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase :str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _A ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int]=None ):
if not isinstance(__lowerCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCamelCase )}""" )
UpperCamelCase :Tuple = image.to(device=__lowerCamelCase , dtype=__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowerCamelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCamelCase :Tuple = init_latents.shape
UpperCamelCase :List[str] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase , dtype=__lowerCamelCase )
# get latents
print("""add noise to latents at timestep""" , __lowerCamelCase )
UpperCamelCase :List[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Tuple = init_latents
return latents
@torch.no_grad()
def __call__( self : Tuple , __lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image] = None , __lowerCamelCase : float = 0.8 , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ):
self.check_inputs(__lowerCamelCase )
# 2. Preprocess image
UpperCamelCase :Any = preprocess(__lowerCamelCase )
# 3. set timesteps
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device )
UpperCamelCase , UpperCamelCase :Dict = self.get_timesteps(__lowerCamelCase , __lowerCamelCase , self.device )
UpperCamelCase :List[Any] = timesteps[:1].repeat(__lowerCamelCase )
# 4. Prepare latent variables
UpperCamelCase :str = self.prepare_latents(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.unet.dtype , self.device , __lowerCamelCase )
UpperCamelCase :List[str] = latents
# 5. Denoising loop
for t in self.progress_bar(__lowerCamelCase ):
# 1. predict noise model_output
UpperCamelCase :List[str] = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase :Union[str, Any] = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , eta=__lowerCamelCase , use_clipped_model_output=__lowerCamelCase , generator=__lowerCamelCase , ).prev_sample
UpperCamelCase :Dict = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase :List[str] = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__lowerCamelCase )
| 38 |
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowerCamelCase : Any = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowerCamelCase : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowerCamelCase : Any = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
lowerCamelCase : List[Any] = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
lowerCamelCase : List[str] = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
lowerCamelCase : List[str] = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(64, 64)
)
lowerCamelCase : Optional[int] = tf.keras.preprocessing.image.img_to_array(test_image)
lowerCamelCase : str = np.expand_dims(test_image, axis=0)
lowerCamelCase : List[str] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowerCamelCase : Any = 'Normal'
if result[0][0] == 1:
lowerCamelCase : Any = 'Abnormality detected'
| 2 | 0 |
def __A ( __lowerCAmelCase = 1_000 )-> int:
"""simple docstring"""
_UpperCAmelCase = 2**power
_UpperCAmelCase = str(__lowerCAmelCase )
_UpperCAmelCase = list(__lowerCAmelCase )
_UpperCAmelCase = 0
for i in list_num:
sum_of_num += int(__lowerCAmelCase )
return sum_of_num
if __name__ == "__main__":
_a = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
_a = solution(power)
print('''Sum of the digits is: ''', result)
| 39 |
'''simple docstring'''
class __lowerCAmelCase : # Public class to implement a graph
'''simple docstring'''
def __init__(self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ):
'''simple docstring'''
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCamelCase__ (self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ):
'''simple docstring'''
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase )
def UpperCamelCase__ (self : Dict ): # And finally, count all islands.
'''simple docstring'''
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase , UpperCamelCase , UpperCamelCase )
count += 1
return count
| 2 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
lowerCamelCase : Tuple = 'naver-clova-ix/donut-base'
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = DonutProcessor.from_pretrained(UpperCamelCase )
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowercase__ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowercase__ = self.processor.tokenajson(UpperCamelCase )
self.assertDictEqual(UpperCamelCase , UpperCamelCase )
| 2 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
lowerCamelCase__ : Dict = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : Tuple = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
lowerCamelCase__ : Tuple = s_dict.pop(UpperCamelCase )
elif "subsample" in key:
lowerCamelCase__ : List[Any] = s_dict.pop(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Tuple = emb.weight.shape
lowerCamelCase__ : int = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
lowerCamelCase__ : Optional[Any] = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : int = torch.load(UpperCamelCase , map_location="""cpu""" )
lowerCamelCase__ : List[Any] = mam_aaa["""args"""]
lowerCamelCase__ : Union[str, Any] = mam_aaa["""model"""]
lowerCamelCase__ : List[str] = state_dict["""decoder.output_projection.weight"""]
remove_ignore_keys_(UpperCamelCase )
rename_keys(UpperCamelCase )
lowerCamelCase__ : List[Any] = state_dict["""decoder.embed_tokens.weight"""].shape[0]
lowerCamelCase__ : int = args.share_decoder_input_output_embed
lowerCamelCase__ : Optional[int] = [int(UpperCamelCase ) for i in args.conv_kernel_sizes.split(""",""" )]
lowerCamelCase__ : List[Any] = SpeechaTextConfig(
vocab_size=UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , num_conv_layers=len(UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase , num_beams=5 , max_length=200 , use_cache=UpperCamelCase , decoder_start_token_id=2 , early_stopping=UpperCamelCase , )
lowerCamelCase__ : List[Any] = SpeechaTextForConditionalGeneration(UpperCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
lowerCamelCase__ : Union[str, Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCamelCase__ : Optional[Any] = lm_head_weights
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--fairseq_path''', type=str, help='''Path to the fairseq model (.pt) file.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A : str =parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 41 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (A ) -> bool:
"""simple docstring"""
return len(set(A ) ) == len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
'''simple docstring'''
from __future__ import annotations
lowercase : Any = 8.988E9 # units = N * m^s * C^-2
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A ) -> dict[str, float]:
_snake_case = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if distance < 0:
raise ValueError('Distance cannot be negative' )
if force == 0:
_snake_case = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
_snake_case = abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
_snake_case = abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
_snake_case = (COULOMBS_CONSTANT * charge_product / abs(__A )) ** 0.5
return {"distance": distance}
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCamelCase : Any = None
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase : List[str] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase : Any = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : int = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ : Optional[int] = TaTokenizer
lowerCAmelCase__ : List[int] = []
def __init__(self : Dict , UpperCamelCase : str=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Any="</s>" , UpperCamelCase : str="<unk>" , UpperCamelCase : List[str]="<pad>" , UpperCamelCase : List[str]=100 , UpperCamelCase : Tuple=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowercase__ = [f"<extra_id_{i}>" for i in range(UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase__ = len(set(filter(lambda UpperCamelCase : bool('''extra_id_''' in str(UpperCamelCase ) ) , UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , extra_ids=UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
lowercase__ = extra_ids
@staticmethod
def UpperCamelCase__ (UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase , )
return max_model_length
def UpperCamelCase__ (self : Any , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase__ = os.path.join(
UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
logger.info(f"Copy vocab file to {out_vocab_file}" )
return (out_vocab_file,)
def UpperCamelCase__ (self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase__ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase ) for token in self.get_sentinel_tokens()]
| 2 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 43 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Dict = ShapEImgaImgPipeline
lowerCAmelCase__ : List[str] = ["""image"""]
lowerCAmelCase__ : Any = ["""image"""]
lowerCAmelCase__ : Any = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase__ : Tuple = False
@property
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
return 32
@property
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase__ (self : int ):
'''simple docstring'''
return 8
@property
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase__ = CLIPVisionModel(UpperCamelCase )
return model
@property
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase , do_normalize=UpperCamelCase , do_resize=UpperCamelCase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase__ = PriorTransformer(**UpperCamelCase )
return model
@property
def UpperCamelCase__ (self : int ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase__ = ShapERenderer(**UpperCamelCase )
return model
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.dummy_prior
lowercase__ = self.dummy_image_encoder
lowercase__ = self.dummy_image_processor
lowercase__ = self.dummy_renderer
lowercase__ = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase , clip_sample=UpperCamelCase , clip_sample_range=1.0 , )
lowercase__ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str=0 ):
'''simple docstring'''
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
if str(UpperCamelCase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase )
else:
lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
lowercase__ = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = '''cpu'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
lowercase__ = output.images[0]
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = torch_device == '''cpu'''
lowercase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase , relax_max_difference=UpperCamelCase , )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = 1
lowercase__ = 2
lowercase__ = self.get_dummy_inputs(UpperCamelCase )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ = batch_size * [inputs[key]]
lowercase__ = pipe(**UpperCamelCase , num_images_per_prompt=UpperCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowercase__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
lowercase__ = pipe(
UpperCamelCase , generator=UpperCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 2 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ) -> list:
for i in range(len(_lowerCamelCase ) - 1 ,0 ,-1 ):
_lowerCAmelCase : Dict = False
for j in range(_lowerCamelCase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
_lowerCAmelCase , _lowerCAmelCase : List[str] = unsorted[j - 1], unsorted[j]
_lowerCAmelCase : int = True
for j in range(_lowerCamelCase ):
if unsorted[j] > unsorted[j + 1]:
_lowerCAmelCase , _lowerCAmelCase : Any = unsorted[j + 1], unsorted[j]
_lowerCAmelCase : int = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : Tuple = input('Enter numbers separated by a comma:\n').strip()
_a : Optional[Any] = [int(item) for item in user_input.split(',')]
print(F"""{cocktail_shaker_sort(unsorted) = }""")
| 44 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase : str = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
lowercase_ = logging.get_logger("transformers.models.speecht5")
lowercase_ = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
lowercase_ = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
lowercase_ = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
lowercase_ = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
lowercase_ = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
lowercase_ = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
lowercase_ = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
lowercase_ = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
lowercase_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
lowercase_ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowercase_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowercase_ = []
lowercase_ = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
lowercase_ = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
lowercase_ = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
lowercase_ = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any ) -> Tuple:
for attribute in key.split('''.''' ):
__a = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
__a = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
__a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
elif weight_type == "running_mean":
__a = value
elif weight_type == "running_var":
__a = value
elif weight_type == "num_batches_tracked":
__a = value
else:
__a = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any ) -> Optional[Any]:
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__a , __a = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] ) -> int:
__a = []
if task == "s2t":
__a = hf_model.speechta.encoder.prenet.feature_encoder
__a = MAPPING_S2T
__a = IGNORE_KEYS_S2T
elif task == "t2s":
__a = None
__a = MAPPING_T2S
__a = IGNORE_KEYS_T2S
elif task == "s2s":
__a = hf_model.speechta.encoder.prenet.feature_encoder
__a = MAPPING_S2S
__a = IGNORE_KEYS_S2S
else:
raise ValueError(f'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info(f'''{name} was ignored''' )
continue
__a = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
__a = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__a , __a = key.split('''.*.''' )
if prefix in name and suffix in name:
__a = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__a = True
if "*" in mapped_key:
__a = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2]
__a = mapped_key.replace('''*''' , lowerCAmelCase__ )
if "weight_g" in name:
__a = '''weight_g'''
elif "weight_v" in name:
__a = '''weight_v'''
elif "bias" in name:
__a = '''bias'''
elif "weight" in name:
__a = '''weight'''
elif "running_mean" in name:
__a = '''running_mean'''
elif "running_var" in name:
__a = '''running_var'''
elif "num_batches_tracked" in name:
__a = '''num_batches_tracked'''
else:
__a = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
__a = full_name.split('''conv_layers.''' )[-1]
__a = name.split('''.''' )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__a = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__a = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__a = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__a = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : List[str]=None , ) -> int:
if config_path is not None:
__a = SpeechTaConfig.from_pretrained(lowerCAmelCase__ )
else:
__a = SpeechTaConfig()
if task == "s2t":
__a = config.max_text_positions
__a = SpeechTaForSpeechToText(lowerCAmelCase__ )
elif task == "t2s":
__a = 1876
__a = 600
__a = config.max_speech_positions
__a = SpeechTaForTextToSpeech(lowerCAmelCase__ )
elif task == "s2s":
__a = 1876
__a = config.max_speech_positions
__a = SpeechTaForSpeechToSpeech(lowerCAmelCase__ )
else:
raise ValueError(f'''Unknown task name: {task}''' )
if vocab_path:
__a = SpeechTaTokenizer(lowerCAmelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__a = AddedToken('''<mask>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
__a = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
__a = SpeechTaFeatureExtractor()
__a = SpeechTaProcessor(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
__a = torch.load(lowerCAmelCase__ )
recursively_load_weights(fairseq_checkpoint['''model'''] , lowerCAmelCase__ , lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowerCAmelCase__ )
model.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
lowercase_ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 45 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = """realm"""
def __init__(self : str , UpperCamelCase : List[Any]=30522 , UpperCamelCase : List[Any]=768 , UpperCamelCase : int=128 , UpperCamelCase : Any=12 , UpperCamelCase : Tuple=12 , UpperCamelCase : List[Any]=8 , UpperCamelCase : Union[str, Any]=3072 , UpperCamelCase : List[str]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Dict=512 , UpperCamelCase : Dict=2 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : List[Any]=1E-12 , UpperCamelCase : Dict=256 , UpperCamelCase : Union[str, Any]=10 , UpperCamelCase : Optional[int]=1E-3 , UpperCamelCase : Tuple=5 , UpperCamelCase : Optional[int]=320 , UpperCamelCase : List[str]=13353718 , UpperCamelCase : Optional[Any]=5000 , UpperCamelCase : str=1 , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : List[Any]=2 , **UpperCamelCase : int , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
# Common config
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = retriever_proj_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_candidates
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = type_vocab_size
lowercase__ = layer_norm_eps
# Reader config
lowercase__ = span_hidden_size
lowercase__ = max_span_width
lowercase__ = reader_layer_norm_eps
lowercase__ = reader_beam_size
lowercase__ = reader_seq_len
# Retrieval config
lowercase__ = num_block_records
lowercase__ = searcher_beam_size
| 2 | 0 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
for param in module.parameters():
lowerCAmelCase = False
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = plt.imshow(SCREAMING_SNAKE_CASE )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE )
plt.show()
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = datetime.now()
lowerCAmelCase = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 46 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : int = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = """mvp"""
lowerCAmelCase__ : Optional[Any] = ["""past_key_values"""]
lowerCAmelCase__ : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self : Any , UpperCamelCase : Optional[int]=50267 , UpperCamelCase : Tuple=1024 , UpperCamelCase : int=12 , UpperCamelCase : Tuple=4096 , UpperCamelCase : Dict=16 , UpperCamelCase : int=12 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : Optional[int]=16 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Union[str, Any]=1024 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : str=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : List[str]=False , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=1 , UpperCamelCase : int=0 , UpperCamelCase : int=2 , UpperCamelCase : Any=True , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Tuple=False , UpperCamelCase : int=100 , UpperCamelCase : Optional[Any]=800 , **UpperCamelCase : str , ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = classifier_dropout
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = use_prompt
lowercase__ = prompt_length
lowercase__ = prompt_mid_dim
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , **UpperCamelCase , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , UpperCamelCase ):
lowercase__ = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
'''The config can simply be saved and uploaded again to be fixed.''' )
| 2 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : int = 10_00 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =1, 1
_SCREAMING_SNAKE_CASE =[]
for i in range(1 , n + 1 ):
_SCREAMING_SNAKE_CASE =prev_numerator + 2 * prev_denominator
_SCREAMING_SNAKE_CASE =prev_numerator + prev_denominator
if len(str(_UpperCamelCase ) ) > len(str(_UpperCamelCase ) ):
result.append(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =numerator
_SCREAMING_SNAKE_CASE =denominator
return len(_UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 47 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : List[str] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : int = DebertaVaTokenizer
lowerCAmelCase__ : List[Any] = DebertaVaTokenizerFast
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Tuple = True
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = DebertaVaTokenizer(UpperCamelCase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = '''this is a test'''
lowercase__ = '''this is a test'''
return input_text, output_text
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''<pad>'''
lowercase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(UpperCamelCase ) , 30001 )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = ''' \tHeLLo!how \n Are yoU? '''
lowercase__ = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = ''' \tHeLLo!how \n Are yoU? '''
lowercase__ = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = '''This is a test'''
lowercase__ = [13, 1, 4398, 25, 21, 1289]
lowercase__ = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__ = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__ = DebertaVaTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , keep_accents=UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# fmt: off
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase__ = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = DebertaVaTokenizer(UpperCamelCase )
lowercase__ = tokenizer.encode('''sequence builders''' )
lowercase__ = tokenizer.encode('''multi-sequence build''' )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase , )
@slow
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 2 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Any = """deberta-v2"""
def __init__( self , UpperCamelCase__=12_8100 , UpperCamelCase__=1536 , UpperCamelCase__=24 , UpperCamelCase__=24 , UpperCamelCase__=6144 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-7 , UpperCamelCase__=False , UpperCamelCase__=-1 , UpperCamelCase__=0 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=0 , UpperCamelCase__="gelu" , **UpperCamelCase__ , ) -> Optional[int]:
super().__init__(**UpperCamelCase__ )
lowerCamelCase : List[str] = hidden_size
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : Optional[Any] = num_attention_heads
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : Any = hidden_act
lowerCamelCase : Dict = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = max_position_embeddings
lowerCamelCase : Optional[Any] = type_vocab_size
lowerCamelCase : List[str] = initializer_range
lowerCamelCase : Tuple = relative_attention
lowerCamelCase : List[str] = max_relative_positions
lowerCamelCase : Optional[int] = pad_token_id
lowerCamelCase : int = position_biased_input
# Backwards compatibility
if type(UpperCamelCase__ ) == str:
lowerCamelCase : Any = [x.strip() for x in pos_att_type.lower().split("|" )]
lowerCamelCase : List[Any] = pos_att_type
lowerCamelCase : Dict = vocab_size
lowerCamelCase : Dict = layer_norm_eps
lowerCamelCase : Any = kwargs.get("pooler_hidden_size" , UpperCamelCase__ )
lowerCamelCase : Dict = pooler_dropout
lowerCamelCase : int = pooler_hidden_act
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase : int = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def _lowercase ( self ) -> int:
return 12
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = 3 , UpperCamelCase__ = 40 , UpperCamelCase__ = 40 , UpperCamelCase__ = None , ) -> Mapping[str, Any]:
lowerCamelCase : Union[str, Any] = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 48 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _SCREAMING_SNAKE_CASE (A ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A , A )
def _SCREAMING_SNAKE_CASE (A ) -> List[str]:
"""simple docstring"""
lowercase__ ,lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(A , A , bias=A )
lowercase__ = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE (A , A="facebook/mbart-large-en-ro" , A=False , A=False ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = torch.load(A , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A )
lowercase__ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase__ = MBartConfig.from_pretrained(A , vocab_size=A )
if mbart_aa and finetuned:
lowercase__ = '''relu'''
lowercase__ = state_dict['''decoder.embed_tokens.weight''']
lowercase__ = MBartForConditionalGeneration(A )
model.model.load_state_dict(A )
if finetuned:
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
lowerCamelCase : Any = parser.parse_args()
lowerCamelCase : List[str] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 2 | 0 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _A ( unittest.TestCase ):
@property
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
torch.manual_seed(0)
__a = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.dummy_uncond_unet
__a = PNDMScheduler()
__a = PNDMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE)
pndm.to(__SCREAMING_SNAKE_CASE)
pndm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = torch.manual_seed(0)
__a = pndm(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=20 , output_type='''numpy''').images
__a = torch.manual_seed(0)
__a = pndm(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=20 , output_type='''numpy''' , return_dict=__SCREAMING_SNAKE_CASE)[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = '''google/ddpm-cifar10-32'''
__a = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = PNDMScheduler()
__a = PNDMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE)
pndm.to(__SCREAMING_SNAKE_CASE)
pndm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = torch.manual_seed(0)
__a = pndm(generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''').images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 49 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowerCamelCase : List[Any] = logging.getLogger(__name__)
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCamelCase : Any=-1 ):
'''simple docstring'''
lowercase__ = label_idx
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[Split, str] ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ):
lowercase__ = mode.value
lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" )
lowercase__ = 1
lowercase__ = []
with open(UpperCamelCase , encoding='''utf-8''' ) as f:
lowercase__ = []
lowercase__ = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) )
guid_index += 1
lowercase__ = []
lowercase__ = []
else:
lowercase__ = line.split(''' ''' )
words.append(splits[0] )
if len(UpperCamelCase ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) )
return examples
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ):
'''simple docstring'''
lowercase__ = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(UpperCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowercase__ = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(UpperCamelCase )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
if path:
with open(UpperCamelCase , '''r''' ) as f:
lowercase__ = f.read().splitlines()
if "O" not in labels:
lowercase__ = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : List[Any] ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : str ):
'''simple docstring'''
if path:
with open(UpperCamelCase , '''r''' ) as f:
lowercase__ = f.read().splitlines()
if "O" not in labels:
lowercase__ = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def UpperCamelCase__ (self : Tuple , UpperCamelCase : int , UpperCamelCase : Union[Split, str] ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ):
lowercase__ = mode.value
lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" )
lowercase__ = 1
lowercase__ = []
with open(UpperCamelCase , encoding='''utf-8''' ) as f:
for sentence in parse_incr(UpperCamelCase ):
lowercase__ = []
lowercase__ = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(UpperCamelCase ) == len(UpperCamelCase )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) )
guid_index += 1
return examples
def UpperCamelCase__ (self : Tuple , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ):
'''simple docstring'''
lowercase__ = 0
for sentence in parse_incr(UpperCamelCase ):
lowercase__ = preds_list[example_id]
lowercase__ = ''''''
for token in sentence:
out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) "
out += "\n"
writer.write(UpperCamelCase )
example_id += 1
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
if path:
with open(UpperCamelCase , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 2 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase ):
@register_to_config
def __init__( self : List[str] , UpperCAmelCase : int = 65536 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 0 , UpperCAmelCase : str = "fourier" , UpperCAmelCase : bool = True , UpperCAmelCase : bool = False , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase : str = None , UpperCAmelCase : Tuple[int] = (32, 32, 64) , UpperCAmelCase : str = None , UpperCAmelCase : int = 8 , UpperCAmelCase : int = 1 , UpperCAmelCase : bool = False , ) -> List[Any]:
super().__init__()
lowerCamelCase__ : Optional[int] = sample_size
# time
if time_embedding_type == "fourier":
lowerCamelCase__ : Optional[Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCAmelCase , log=UpperCAmelCase , flip_sin_to_cos=UpperCAmelCase )
lowerCamelCase__ : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCamelCase__ : List[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase , downscale_freq_shift=UpperCAmelCase )
lowerCamelCase__ : Dict = block_out_channels[0]
if use_timestep_embedding:
lowerCamelCase__ : str = block_out_channels[0] * 4
lowerCamelCase__ : List[Any] = TimestepEmbedding(
in_channels=UpperCAmelCase , time_embed_dim=UpperCAmelCase , act_fn=UpperCAmelCase , out_dim=block_out_channels[0] , )
lowerCamelCase__ : Any = nn.ModuleList([] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : List[str] = nn.ModuleList([] )
lowerCamelCase__ : Optional[int] = None
# down
lowerCamelCase__ : Optional[int] = in_channels
for i, down_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = output_channel
lowerCamelCase__ : Tuple = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCamelCase__ : Union[str, Any] = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : Optional[int] = get_down_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCAmelCase )
# mid
lowerCamelCase__ : Optional[int] = get_mid_block(
UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase , add_downsample=UpperCAmelCase , )
# up
lowerCamelCase__ : Optional[int] = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
lowerCamelCase__ : List[str] = out_channels
else:
lowerCamelCase__ : Any = block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Tuple = output_channel
lowerCamelCase__ : Union[str, Any] = (
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase ) - 1 else final_upsample_channels
)
lowerCamelCase__ : List[str] = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : Dict = get_up_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCAmelCase )
lowerCamelCase__ : int = output_channel
# out
lowerCamelCase__ : int = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCamelCase__ : List[Any] = get_out_block(
out_block_type=UpperCAmelCase , num_groups_out=UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase , act_fn=UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def A_ ( self : List[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Union[torch.Tensor, float, int] , UpperCAmelCase : bool = True , ) -> Union[UNetaDOutput, Tuple]:
lowerCamelCase__ : Optional[Any] = timestep
if not torch.is_tensor(UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[str] = timesteps[None].to(sample.device )
lowerCamelCase__ : Optional[int] = self.time_proj(UpperCAmelCase )
if self.config.use_timestep_embedding:
lowerCamelCase__ : str = self.time_mlp(UpperCAmelCase )
else:
lowerCamelCase__ : List[str] = timestep_embed[..., None]
lowerCamelCase__ : str = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCamelCase__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCamelCase__ : str = ()
for downsample_block in self.down_blocks:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = downsample_block(hidden_states=UpperCAmelCase , temb=UpperCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCamelCase__ : Optional[Any] = self.mid_block(UpperCAmelCase , UpperCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCamelCase__ : Dict = down_block_res_samples[-1:]
lowerCamelCase__ : Optional[Any] = down_block_res_samples[:-1]
lowerCamelCase__ : Any = upsample_block(UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , temb=UpperCAmelCase )
# 5. post-process
if self.out_block:
lowerCamelCase__ : Any = self.out_block(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase )
| 50 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = """megatron-bert"""
def __init__(self : Tuple , UpperCamelCase : Optional[int]=29056 , UpperCamelCase : Optional[Any]=1024 , UpperCamelCase : Any=24 , UpperCamelCase : int=16 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : int="gelu" , UpperCamelCase : int=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Any=512 , UpperCamelCase : int=2 , UpperCamelCase : Dict=0.02 , UpperCamelCase : Dict=1E-12 , UpperCamelCase : List[Any]=0 , UpperCamelCase : Optional[int]="absolute" , UpperCamelCase : List[Any]=True , **UpperCamelCase : str , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
| 2 | 0 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
snake_case_ : Optional[Any] = datasets.load_iris()
snake_case_ : str = np.array(data["data"])
snake_case_ : Any = np.array(data["target"])
snake_case_ : Optional[int] = data["target_names"]
snake_case_ , snake_case_ , snake_case_ , snake_case_ : int = train_test_split(X, y)
def A (__A : Tuple , __A : str ) -> Any:
"""simple docstring"""
return np.linalg.norm(np.array(__A ) - np.array(__A ) )
def A (__A : str , __A : Any , __A : int , __A : int , __A : List[str]=5 ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = zip(__A , __A )
# List of distances of all points from the point to be classified
UpperCAmelCase_ = []
for data_point in data:
UpperCAmelCase_ = euclidean_distance(data_point[0] , __A )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCAmelCase_ = [i[1] for i in sorted(__A )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCAmelCase_ = Counter(__A ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 51 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
lowerCamelCase : Any = re.compile(R'([A-Z]+)([A-Z][a-z])')
lowerCamelCase : str = re.compile(R'([a-z\d])([A-Z])')
lowerCamelCase : Optional[int] = re.compile(R'(?<!_)_(?!_)')
lowerCamelCase : List[Any] = re.compile(R'(_{2,})')
lowerCamelCase : str = R'^\w+(\.\w+)*$'
lowerCamelCase : Dict = R'<>:/\|?*'
def _SCREAMING_SNAKE_CASE (A ) -> Any:
"""simple docstring"""
lowercase__ = _uppercase_uppercase_re.sub(R'''\1_\2''' , A )
lowercase__ = _lowercase_uppercase_re.sub(R'''\1_\2''' , A )
return name.lower()
def _SCREAMING_SNAKE_CASE (A ) -> Tuple:
"""simple docstring"""
lowercase__ = _single_underscore_re.split(A )
lowercase__ = [_multiple_underscores_re.split(A ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(A ) if n != '''''' )
def _SCREAMING_SNAKE_CASE (A ) -> Tuple:
"""simple docstring"""
if os.path.basename(A ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]:
"""simple docstring"""
if os.path.basename(A ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , A ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(A )}-{split}"
def _SCREAMING_SNAKE_CASE (A , A , A , A=None ) -> List[str]:
"""simple docstring"""
lowercase__ = filename_prefix_for_split(A , A )
if filetype_suffix:
prefix += f".{filetype_suffix}"
lowercase__ = os.path.join(A , A )
return f"{filepath}*"
def _SCREAMING_SNAKE_CASE (A , A , A , A=None , A=None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = filename_prefix_for_split(A , A )
lowercase__ = os.path.join(A , A )
if shard_lengths:
lowercase__ = len(A )
lowercase__ = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(A )]
if filetype_suffix:
lowercase__ = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
lowercase__ = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 2 | 0 |
from scipy.stats import spearmanr
import datasets
__lowerCamelCase : Optional[Any] = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
__lowerCamelCase : Any = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
__lowerCamelCase : Optional[int] = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def __UpperCamelCase( self , A_ , A_ , A_=False ):
'''simple docstring'''
UpperCamelCase : Any = spearmanr(A_ , A_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 52 |
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : str , UpperCamelCase : Tuple , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Optional[int]=13 , UpperCamelCase : Tuple=16 , UpperCamelCase : Union[str, Any]=7 , UpperCamelCase : List[Any]=True , UpperCamelCase : List[str]=True , UpperCamelCase : str=True , UpperCamelCase : Tuple=False , UpperCamelCase : str=True , UpperCamelCase : Tuple=2 , UpperCamelCase : Optional[int]=32 , UpperCamelCase : Any=4 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Tuple=30 , UpperCamelCase : str=0 , UpperCamelCase : Tuple=1 , UpperCamelCase : List[Any]=2 , UpperCamelCase : str=None , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = decoder_seq_length
# For common tests
lowercase__ = self.decoder_seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_model
lowercase__ = decoder_layers
lowercase__ = decoder_layers
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_attention_heads
lowercase__ = decoder_attention_heads
lowercase__ = eos_token_id
lowercase__ = bos_token_id
lowercase__ = pad_token_id
lowercase__ = decoder_start_token_id
lowercase__ = use_cache
lowercase__ = max_position_embeddings
lowercase__ = None
lowercase__ = decoder_seq_length
lowercase__ = 2
lowercase__ = 1
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
lowercase__ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def UpperCamelCase__ (self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , ):
'''simple docstring'''
lowercase__ = True
lowercase__ = TrOCRDecoder(config=UpperCamelCase ).to(UpperCamelCase ).eval()
lowercase__ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase )
lowercase__ = model(UpperCamelCase )
lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 )
lowercase__ = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
lowercase__ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
lowercase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__ = model(UpperCamelCase )['''last_hidden_state''']
lowercase__ = model(UpperCamelCase , past_key_values=UpperCamelCase )['''last_hidden_state''']
# select random slice
lowercase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
lowercase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase (lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCAmelCase__ : List[Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCAmelCase__ : Optional[Any] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : List[str] = False
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
lowercase__ = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCamelCase )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*UpperCamelCase )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
| 2 | 0 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
a__ : List[Any] =logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="vision-encoder-decoder"
SCREAMING_SNAKE_CASE_ : Tuple =True
def __init__( self : List[str] , **__A : List[str] ):
super().__init__(**__A )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
__UpperCamelCase = kwargs.pop('encoder' )
__UpperCamelCase = encoder_config.pop('model_type' )
__UpperCamelCase = kwargs.pop('decoder' )
__UpperCamelCase = decoder_config.pop('model_type' )
__UpperCamelCase = AutoConfig.for_model(__A , **__A )
__UpperCamelCase = AutoConfig.for_model(__A , **__A )
__UpperCamelCase = True
@classmethod
def _lowerCamelCase ( cls : Any , __A : PretrainedConfig , __A : PretrainedConfig , **__A : Union[str, Any] ):
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
__UpperCamelCase = True
__UpperCamelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.encoder.to_dict()
__UpperCamelCase = self.decoder.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple =version.parse("1.11" )
@property
def _lowerCamelCase ( self : str ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _lowerCamelCase ( self : Dict ):
return 1e-4
@property
def _lowerCamelCase ( self : List[str] ):
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
@property
def _lowerCamelCase ( self : int ):
__UpperCamelCase = OrderedDict()
__UpperCamelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
__UpperCamelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
__UpperCamelCase = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def _lowerCamelCase ( self : Union[str, Any] , __A : "PreTrainedTokenizerBase" , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional["TensorType"] = None , ):
import torch
__UpperCamelCase = OrderedDict()
__UpperCamelCase = super().generate_dummy_inputs(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
__UpperCamelCase , __UpperCamelCase = dummy_input['input_ids'].shape
__UpperCamelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
__UpperCamelCase = dummy_input.pop('input_ids' )
__UpperCamelCase = dummy_input.pop('attention_mask' )
__UpperCamelCase = torch.zeros(__A )
return common_inputs
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
@property
def _lowerCamelCase ( self : str ):
pass
def _lowerCamelCase ( self : Optional[int] , __A : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(__A )
def _lowerCamelCase ( self : Union[str, Any] , __A : PretrainedConfig , __A : PretrainedConfig , __A : str = "default" ):
__UpperCamelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__A , __A )
| 53 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
if not isinstance(A , A ):
raise TypeError('''only integers accepted as input''' )
else:
lowercase__ = str(abs(A ) )
lowercase__ = [list(A ) for char in range(len(A ) )]
for index in range(len(A ) ):
num_transpositions[index].pop(A )
return max(
int(''''''.join(list(A ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 2 | 0 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a__ : int = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__SCREAMING_SNAKE_CASE = k.replace(lowerCAmelCase_ , lowerCAmelCase_ )
return k
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = DEFAULTS.copy()
cfg_kwargs.update(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = PegasusConfig(**lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = PegasusForConditionalGeneration(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = torch_model.model.state_dict()
__SCREAMING_SNAKE_CASE = {}
for k, v in tf_weights.items():
__SCREAMING_SNAKE_CASE = rename_state_dict_key(lowerCAmelCase_ )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
__SCREAMING_SNAKE_CASE = v.T
__SCREAMING_SNAKE_CASE = torch.tensor(lowerCAmelCase_ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
__SCREAMING_SNAKE_CASE = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
__SCREAMING_SNAKE_CASE = mapping["shared.weight"]
__SCREAMING_SNAKE_CASE = mapping["shared.weight"]
__SCREAMING_SNAKE_CASE = {k: torch.zeros_like(lowerCAmelCase_ ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch_model.model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCAmelCase__ (lowerCAmelCase_="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tf.train.list_variables(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = ["Adafactor", "global_step"]
for name, shape in tqdm(lowerCAmelCase_ , desc="converting tf checkpoint to dict" ):
__SCREAMING_SNAKE_CASE = any(pat in name for pat in ignore_name )
if skip_key:
continue
__SCREAMING_SNAKE_CASE = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = array
return tf_weights
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Path(lowerCAmelCase_ ).parent.name
__SCREAMING_SNAKE_CASE = task_specific_params[f"""summarization_{dataset}"""]["max_position_embeddings"]
__SCREAMING_SNAKE_CASE = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=lowerCAmelCase_ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(lowerCAmelCase_ )
# convert model
__SCREAMING_SNAKE_CASE = get_tf_weights_as_numpy(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
__SCREAMING_SNAKE_CASE = task_specific_params
__SCREAMING_SNAKE_CASE = convert_pegasus(lowerCAmelCase_ , lowerCAmelCase_ )
torch_model.save_pretrained(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(lowerCAmelCase_ , Path(lowerCAmelCase_ ) / "pytorch_model.bin" )
if __name__ == "__main__":
a__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
a__ : List[str] = parser.parse_args()
if args.save_dir is None:
a__ : int = Path(args.tf_ckpt_path).parent.name
a__ : Tuple = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 54 |
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowerCamelCase : str = Mapping[str, np.ndarray]
lowerCamelCase : List[Any] = Mapping[str, Any] # Is a nested dict.
lowerCamelCase : Any = 0.0_1
@dataclasses.dataclass(frozen=lowercase_ )
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowerCAmelCase__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowerCAmelCase__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowerCAmelCase__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowerCAmelCase__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
lowerCAmelCase__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
lowerCAmelCase__ : Optional[Sequence[int]] = None
def _SCREAMING_SNAKE_CASE (A ) -> Protein:
"""simple docstring"""
lowercase__ = R'''(\[[A-Z]+\]\n)'''
lowercase__ = [tag.strip() for tag in re.split(A , A ) if len(A ) > 0]
lowercase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
lowercase__ = ["N", "CA", "C"]
lowercase__ = None
lowercase__ = None
lowercase__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowercase__ = g[1][0].strip()
for i in range(len(A ) ):
if seq[i] not in residue_constants.restypes:
lowercase__ = '''X''' # FIXME: strings are immutable
lowercase__ = np.array(
[residue_constants.restype_order.get(A , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowercase__ = []
for axis in range(3 ):
tertiary.append(list(map(A , g[1][axis].split() ) ) )
lowercase__ = np.array(A )
lowercase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(A ):
lowercase__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowercase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
lowercase__ = np.zeros(
(
len(A ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(A ):
lowercase__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=A , atom_mask=A , aatype=A , residue_index=np.arange(len(A ) ) , b_factors=A , )
def _SCREAMING_SNAKE_CASE (A , A = 0 ) -> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = prot.remark
if remark is not None:
pdb_headers.append(f"REMARK {remark}" )
lowercase__ = prot.parents
lowercase__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowercase__ = [p for i, p in zip(A , A ) if i == chain_id]
if parents is None or len(A ) == 0:
lowercase__ = ['''N/A''']
pdb_headers.append(f"PARENT {' '.join(A )}" )
return pdb_headers
def _SCREAMING_SNAKE_CASE (A , A ) -> str:
"""simple docstring"""
lowercase__ = []
lowercase__ = pdb_str.split('''\n''' )
lowercase__ = prot.remark
if remark is not None:
out_pdb_lines.append(f"REMARK {remark}" )
lowercase__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowercase__ = []
if prot.parents_chain_index is not None:
lowercase__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(A ) , [] )
parent_dict[str(A )].append(A )
lowercase__ = max([int(A ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowercase__ = parent_dict.get(str(A ) , ['''N/A'''] )
parents_per_chain.append(A )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowercase__ = [['''N/A''']]
def make_parent_line(A ) -> str:
return f"PARENT {' '.join(A )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowercase__ = 0
for i, l in enumerate(A ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(A )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(A ):
lowercase__ = parents_per_chain[chain_counter]
else:
lowercase__ = ['''N/A''']
out_pdb_lines.append(make_parent_line(A ) )
return "\n".join(A )
def _SCREAMING_SNAKE_CASE (A ) -> str:
"""simple docstring"""
lowercase__ = residue_constants.restypes + ['''X''']
def res_atoa(A ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
lowercase__ = residue_constants.atom_types
lowercase__ = []
lowercase__ = prot.atom_mask
lowercase__ = prot.aatype
lowercase__ = prot.atom_positions
lowercase__ = prot.residue_index.astype(np.intaa )
lowercase__ = prot.b_factors
lowercase__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
lowercase__ = get_pdb_headers(A )
if len(A ) > 0:
pdb_lines.extend(A )
lowercase__ = aatype.shape[0]
lowercase__ = 1
lowercase__ = 0
lowercase__ = string.ascii_uppercase
lowercase__ = None
# Add all atom sites.
for i in range(A ):
lowercase__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(A , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowercase__ = '''ATOM'''
lowercase__ = atom_name if len(A ) == 4 else f" {atom_name}"
lowercase__ = ''''''
lowercase__ = ''''''
lowercase__ = 1.00
lowercase__ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowercase__ = ''''''
lowercase__ = '''A'''
if chain_index is not None:
lowercase__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowercase__ = (
f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
f"{res_name_a:>3} {chain_tag:>1}"
f"{residue_index[i]:>4}{insertion_code:>1} "
f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
f"{occupancy:>6.2f}{b_factor:>6.2f} "
f"{element:>2}{charge:>2}"
)
pdb_lines.append(A )
atom_index += 1
lowercase__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowercase__ = True
lowercase__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowercase__ = '''TER'''
lowercase__ = (
f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(A )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(A , A ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(A )
def _SCREAMING_SNAKE_CASE (A ) -> np.ndarray:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _SCREAMING_SNAKE_CASE (A , A , A = None , A = None , A = None , A = None , A = None , ) -> Protein:
"""simple docstring"""
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=A , remark=A , parents=A , parents_chain_index=A , )
| 2 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a_ : Dict = logging.get_logger(__name__)
a_ : Tuple = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
for attribute in key.split("." ):
lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
lowerCamelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCamelCase_ = value
elif weight_type == "weight_g":
lowerCamelCase_ = value
elif weight_type == "weight_v":
lowerCamelCase_ = value
elif weight_type == "bias":
lowerCamelCase_ = value
else:
lowerCamelCase_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str ):
lowerCamelCase_ = []
lowerCamelCase_ = fairseq_model.state_dict()
lowerCamelCase_ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase_ = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase_ = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase_ = True
if "*" in mapped_key:
lowerCamelCase_ = name.split(UpperCAmelCase_ )[0].split("." )[-2]
lowerCamelCase_ = mapped_key.replace("*" , UpperCAmelCase_ )
if "weight_g" in name:
lowerCamelCase_ = "weight_g"
elif "weight_v" in name:
lowerCamelCase_ = "weight_v"
elif "weight" in name:
lowerCamelCase_ = "weight"
elif "bias" in name:
lowerCamelCase_ = "bias"
else:
lowerCamelCase_ = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] ):
lowerCamelCase_ = full_name.split("conv_layers." )[-1]
lowerCamelCase_ = name.split("." )
lowerCamelCase_ = int(items[0] )
lowerCamelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCamelCase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCamelCase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCamelCase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCamelCase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase_ )
def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ):
lowerCamelCase_ = SEWConfig()
if is_finetuned:
lowerCamelCase_ = model.wav_encoder.wav_model.cfg
else:
lowerCamelCase_ = model.cfg
lowerCamelCase_ = fs_config.conv_bias
lowerCamelCase_ = eval(fs_config.conv_feature_layers )
lowerCamelCase_ = [x[0] for x in conv_layers]
lowerCamelCase_ = [x[1] for x in conv_layers]
lowerCamelCase_ = [x[2] for x in conv_layers]
lowerCamelCase_ = "gelu"
lowerCamelCase_ = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
lowerCamelCase_ = 0.0
lowerCamelCase_ = fs_config.activation_fn.name
lowerCamelCase_ = fs_config.encoder_embed_dim
lowerCamelCase_ = 0.02
lowerCamelCase_ = fs_config.encoder_ffn_embed_dim
lowerCamelCase_ = 1E-5
lowerCamelCase_ = fs_config.encoder_layerdrop
lowerCamelCase_ = fs_config.encoder_attention_heads
lowerCamelCase_ = fs_config.conv_pos_groups
lowerCamelCase_ = fs_config.conv_pos
lowerCamelCase_ = len(UpperCAmelCase_ )
lowerCamelCase_ = fs_config.encoder_layers
lowerCamelCase_ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowerCamelCase_ = model.cfg
lowerCamelCase_ = fs_config.final_dropout
lowerCamelCase_ = fs_config.layerdrop
lowerCamelCase_ = fs_config.activation_dropout
lowerCamelCase_ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowerCamelCase_ = fs_config.attention_dropout
lowerCamelCase_ = fs_config.dropout_input
lowerCamelCase_ = fs_config.dropout
lowerCamelCase_ = fs_config.mask_channel_length
lowerCamelCase_ = fs_config.mask_channel_prob
lowerCamelCase_ = fs_config.mask_length
lowerCamelCase_ = fs_config.mask_prob
lowerCamelCase_ = "Wav2Vec2FeatureExtractor"
lowerCamelCase_ = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : int=True ):
if is_finetuned:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowerCamelCase_ = SEWConfig.from_pretrained(UpperCAmelCase_ )
else:
lowerCamelCase_ = convert_config(model[0] , UpperCAmelCase_ )
lowerCamelCase_ = model[0].eval()
lowerCamelCase_ = True if config.feat_extract_norm == "layer" else False
lowerCamelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
if is_finetuned:
if dict_path:
lowerCamelCase_ = Dictionary.load(UpperCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase_ = target_dict.pad_index
lowerCamelCase_ = target_dict.bos_index
lowerCamelCase_ = target_dict.pad_index
lowerCamelCase_ = target_dict.bos_index
lowerCamelCase_ = target_dict.eos_index
lowerCamelCase_ = len(target_dict.symbols )
lowerCamelCase_ = os.path.join(UpperCAmelCase_ , "vocab.json" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCAmelCase_ ) )
return
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , UpperCAmelCase_ )
lowerCamelCase_ = WavaVecaCTCTokenizer(
UpperCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCAmelCase_ , )
lowerCamelCase_ = WavaVecaProcessor(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
lowerCamelCase_ = SEWForCTC(UpperCAmelCase_ )
else:
lowerCamelCase_ = SEWModel(UpperCAmelCase_ )
feature_extractor.save_pretrained(UpperCAmelCase_ )
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
hf_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
a_ : int = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 55 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (A , A ) -> list[list[int]]:
"""simple docstring"""
lowercase__ = []
create_all_state(1 , A , A , [] , A )
return result
def _SCREAMING_SNAKE_CASE (A , A , A , A , A , ) -> None:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(A , total_number - level + 2 ):
current_list.append(A )
create_all_state(i + 1 , A , level - 1 , A , A )
current_list.pop()
def _SCREAMING_SNAKE_CASE (A ) -> None:
"""simple docstring"""
for i in total_list:
print(*A )
if __name__ == "__main__":
lowerCamelCase : Tuple = 4
lowerCamelCase : Union[str, Any] = 2
lowerCamelCase : Dict = generate_all_combinations(n, k)
print_all_state(total_list)
| 2 | 0 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"could not parse string as bool {string}" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
a : List[str] = parser.parse_args()
a : str = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 56 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCamelCase : Optional[Any] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
lowerCamelCase : Tuple = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
lowerCamelCase : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
lowerCamelCase : Any = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
lowerCamelCase : Tuple = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
lowerCamelCase : Optional[int] = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
lowerCamelCase : Dict = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
"""simple docstring"""
lowercase__ ,lowercase__ = randrange(len(A ) ), randrange(len(A ) )
lowercase__ = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
lowercase__ ,lowercase__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _SCREAMING_SNAKE_CASE (A = 100 ) -> str:
"""simple docstring"""
return (generate_random_hand() for _ in range(A ))
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]:
"""simple docstring"""
assert PokerHand(A )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(A )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , A )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Any:
"""simple docstring"""
lowercase__ = PokerHand(A )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Tuple:
"""simple docstring"""
assert PokerHand(A )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(A )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected
def _SCREAMING_SNAKE_CASE () -> Tuple:
"""simple docstring"""
lowercase__ = [PokerHand(A ) for hand in SORTED_HANDS]
lowercase__ = poker_hands.copy()
shuffle(A )
lowercase__ = chain(sorted(A ) )
for index, hand in enumerate(A ):
assert hand == poker_hands[index]
def _SCREAMING_SNAKE_CASE () -> List[Any]:
"""simple docstring"""
lowercase__ = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=A )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _SCREAMING_SNAKE_CASE () -> int:
"""simple docstring"""
lowercase__ = PokerHand('''2C 4S AS 3D 5C''' )
lowercase__ = True
lowercase__ = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
"""simple docstring"""
lowercase__ = 0
lowercase__ = os.path.abspath(os.path.dirname(A ) )
lowercase__ = os.path.join(A , '''poker_hands.txt''' )
with open(A ) as file_hand:
for line in file_hand:
lowercase__ = line[:14].strip()
lowercase__ = line[15:].strip()
lowercase__ ,lowercase__ = PokerHand(A ), PokerHand(A )
lowercase__ = player.compare_with(A )
if output == "Win":
answer += 1
assert answer == 376
| 2 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
__lowerCAmelCase = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(_UpperCamelCase )
DownloadCommand.register_subcommand(_UpperCamelCase )
EnvironmentCommand.register_subcommand(_UpperCamelCase )
RunCommand.register_subcommand(_UpperCamelCase )
ServeCommand.register_subcommand(_UpperCamelCase )
UserCommands.register_subcommand(_UpperCamelCase )
AddNewModelCommand.register_subcommand(_UpperCamelCase )
AddNewModelLikeCommand.register_subcommand(_UpperCamelCase )
LfsCommands.register_subcommand(_UpperCamelCase )
PTtoTFCommand.register_subcommand(_UpperCamelCase )
# Let's go
__lowerCAmelCase = parser.parse_args()
if not hasattr(_UpperCamelCase , "func" ):
parser.print_help()
exit(1 )
# Run
__lowerCAmelCase = args.func(_UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 57 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase : str = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase : List[Any] = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase : Any = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCamelCase : int = model.state_dict()
lowerCamelCase : int = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
lowerCamelCase : Tuple = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
lowerCamelCase : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
lowerCamelCase : List[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
lowerCamelCase : Tuple = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
lowerCamelCase : Optional[int] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
lowerCamelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
lowerCamelCase : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
lowerCamelCase : Any = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
lowerCamelCase : Optional[int] = state_dict['cls.predictions.decoder.weight']
lowerCamelCase : str = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase : str = state_dict[f"""cls.predictions.transform.dense.{w}"""]
lowerCamelCase : Any = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 2 | 0 |
'''simple docstring'''
import operator as op
lowercase_ = """scaler.pt"""
lowercase_ = """pytorch_model"""
lowercase_ = """random_states"""
lowercase_ = """optimizer"""
lowercase_ = """scheduler"""
lowercase_ = """pytorch_model.bin"""
lowercase_ = """pytorch_model.bin.index.json"""
lowercase_ = """model.safetensors"""
lowercase_ = """model.safetensors.index.json"""
lowercase_ = """1.10.2"""
lowercase_ = """py38"""
lowercase_ = """4.17.0"""
lowercase_ = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
lowercase_ = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
lowercase_ = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
lowercase_ = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
lowercase_ = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
lowercase_ = """2.0.1"""
lowercase_ = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
lowercase_ = ["""default""", """reduce-overhead""", """max-autotune"""]
lowercase_ = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
lowercase_ = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
lowercase_ = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
lowercase_ = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 58 |
'''simple docstring'''
from ....utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : int=2048 ):
'''simple docstring'''
lowercase__ = config.__dict__
lowercase__ = modal_hidden_size
if num_labels:
lowercase__ = num_labels
| 2 | 0 |
import os
def UpperCamelCase ( __lowerCamelCase : str = "input.txt" ):
with open(os.path.join(os.path.dirname(__lowerCamelCase ) , __lowerCamelCase ) ) as input_file:
snake_case : Dict = [
[int(__lowerCamelCase ) for element in line.split("," )]
for line in input_file.readlines()
]
snake_case : Tuple = len(__lowerCamelCase )
snake_case : Optional[int] = len(matrix[0] )
snake_case : Tuple = [[-1 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
snake_case : List[str] = matrix[i][0]
for j in range(1 , __lowerCamelCase ):
for i in range(__lowerCamelCase ):
snake_case : Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __lowerCamelCase ):
snake_case : Optional[int] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
snake_case : Tuple = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 59 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = """cvt"""
def __init__(self : int , UpperCamelCase : List[Any]=3 , UpperCamelCase : int=[7, 3, 3] , UpperCamelCase : str=[4, 2, 2] , UpperCamelCase : Dict=[2, 1, 1] , UpperCamelCase : Dict=[64, 192, 384] , UpperCamelCase : Dict=[1, 3, 6] , UpperCamelCase : Dict=[1, 2, 10] , UpperCamelCase : Any=[4.0, 4.0, 4.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : int=[0.0, 0.0, 0.1] , UpperCamelCase : Any=[True, True, True] , UpperCamelCase : int=[False, False, True] , UpperCamelCase : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase : Optional[int]=[3, 3, 3] , UpperCamelCase : Tuple=[1, 1, 1] , UpperCamelCase : Any=[2, 2, 2] , UpperCamelCase : Dict=[1, 1, 1] , UpperCamelCase : List[str]=[1, 1, 1] , UpperCamelCase : str=0.02 , UpperCamelCase : int=1E-12 , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = patch_stride
lowercase__ = patch_padding
lowercase__ = embed_dim
lowercase__ = num_heads
lowercase__ = depth
lowercase__ = mlp_ratio
lowercase__ = attention_drop_rate
lowercase__ = drop_rate
lowercase__ = drop_path_rate
lowercase__ = qkv_bias
lowercase__ = cls_token
lowercase__ = qkv_projection_method
lowercase__ = kernel_qkv
lowercase__ = padding_kv
lowercase__ = stride_kv
lowercase__ = padding_q
lowercase__ = stride_q
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
| 2 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Any = logging.get_logger(__name__)
def _snake_case ( _snake_case : List[Any] , _snake_case : Tuple=False ):
lowerCAmelCase : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _snake_case ( _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Tuple=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase : Optional[int] = ''''''
else:
lowerCAmelCase : Union[str, Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase : List[Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCAmelCase : Tuple = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase : Tuple = in_proj_bias[: config.hidden_size]
lowerCAmelCase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def _snake_case ( _snake_case : Tuple ):
lowerCAmelCase : List[Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : List[Any] ):
lowerCAmelCase : Optional[int] = dct.pop(_snake_case )
lowerCAmelCase : Union[str, Any] = val
def _snake_case ( ):
lowerCAmelCase : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : Any = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[Any] ):
lowerCAmelCase : Any = ViTConfig()
lowerCAmelCase : Any = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowerCAmelCase : List[str] = True
lowerCAmelCase : int = int(vit_name[-12:-10] )
lowerCAmelCase : List[Any] = int(vit_name[-9:-6] )
else:
lowerCAmelCase : str = 1000
lowerCAmelCase : Optional[int] = '''huggingface/label-files'''
lowerCAmelCase : Any = '''imagenet-1k-id2label.json'''
lowerCAmelCase : Optional[Any] = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase : Optional[Any] = {int(_snake_case ): v for k, v in idalabel.items()}
lowerCAmelCase : Dict = idalabel
lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase : List[str] = int(vit_name[-6:-4] )
lowerCAmelCase : int = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
lowerCAmelCase : str = 192
lowerCAmelCase : int = 768
lowerCAmelCase : List[str] = 12
lowerCAmelCase : str = 3
elif vit_name[9:].startswith('''small''' ):
lowerCAmelCase : List[str] = 384
lowerCAmelCase : Optional[int] = 1536
lowerCAmelCase : int = 12
lowerCAmelCase : str = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
lowerCAmelCase : List[str] = 768
lowerCAmelCase : Dict = 2304
lowerCAmelCase : Dict = 8
lowerCAmelCase : Tuple = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
lowerCAmelCase : Union[str, Any] = 1024
lowerCAmelCase : List[Any] = 4096
lowerCAmelCase : Union[str, Any] = 24
lowerCAmelCase : Any = 16
elif vit_name[4:].startswith('''huge''' ):
lowerCAmelCase : Any = 1280
lowerCAmelCase : str = 5120
lowerCAmelCase : Tuple = 32
lowerCAmelCase : Tuple = 16
# load original model from timm
lowerCAmelCase : Any = timm.create_model(_snake_case , pretrained=_snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase : int = timm_model.state_dict()
if base_model:
remove_classification_head_(_snake_case )
lowerCAmelCase : Optional[Any] = create_rename_keys(_snake_case , _snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
read_in_q_k_v(_snake_case , _snake_case , _snake_case )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCAmelCase : Any = ViTModel(_snake_case ).eval()
else:
lowerCAmelCase : Any = ViTForImageClassification(_snake_case ).eval()
model.load_state_dict(_snake_case )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowerCAmelCase : Dict = DeiTImageProcessor(size=config.image_size )
else:
lowerCAmelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
lowerCAmelCase : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCAmelCase : Dict = encoding['''pixel_values''']
lowerCAmelCase : List[Any] = model(_snake_case )
if base_model:
lowerCAmelCase : Dict = timm_model.forward_features(_snake_case )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_snake_case , outputs.pooler_output , atol=1E-3 )
else:
lowerCAmelCase : Dict = timm_model(_snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_snake_case , outputs.logits , atol=1E-3 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
snake_case__ : int = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 60 |
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowerCamelCase : Any = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowerCamelCase : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowerCamelCase : Any = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
lowerCamelCase : List[Any] = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
lowerCamelCase : List[str] = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
lowerCamelCase : List[str] = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(64, 64)
)
lowerCamelCase : Optional[int] = tf.keras.preprocessing.image.img_to_array(test_image)
lowerCamelCase : str = np.expand_dims(test_image, axis=0)
lowerCamelCase : List[str] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowerCamelCase : Any = 'Normal'
if result[0][0] == 1:
lowerCamelCase : Any = 'Abnormality detected'
| 2 | 0 |
"""simple docstring"""
_a = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 61 |
'''simple docstring'''
class __lowerCAmelCase : # Public class to implement a graph
'''simple docstring'''
def __init__(self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ):
'''simple docstring'''
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCamelCase__ (self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ):
'''simple docstring'''
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase )
def UpperCamelCase__ (self : Dict ): # And finally, count all islands.
'''simple docstring'''
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase , UpperCamelCase , UpperCamelCase )
count += 1
return count
| 2 | 0 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray ):
return input_array.reshape((input_array.size, 1) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =np.nan
for i in range(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =features[:, labels == i]
__UpperCamelCase =data.mean(1 )
# Centralize the data of class i
__UpperCamelCase =data - column_reshape(SCREAMING_SNAKE_CASE__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(SCREAMING_SNAKE_CASE__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
__UpperCamelCase =np.dot(SCREAMING_SNAKE_CASE__ , centered_data.T )
return covariance_sum / features.shape[1]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =features.mean(1 )
__UpperCamelCase =np.nan
for i in range(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =features[:, labels == i]
__UpperCamelCase =data.shape[1]
__UpperCamelCase =data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(SCREAMING_SNAKE_CASE__ ) - column_reshape(SCREAMING_SNAKE_CASE__ ) , (column_reshape(SCREAMING_SNAKE_CASE__ ) - column_reshape(SCREAMING_SNAKE_CASE__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
__UpperCamelCase =device_data * np.dot(
column_reshape(SCREAMING_SNAKE_CASE__ ) - column_reshape(SCREAMING_SNAKE_CASE__ ) , (column_reshape(SCREAMING_SNAKE_CASE__ ) - column_reshape(SCREAMING_SNAKE_CASE__ )).T , )
return covariance_sum / features.shape[1]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ):
# Check if the features have been loaded
if features.any():
__UpperCamelCase =features.mean(1 )
# Center the dataset
__UpperCamelCase =features - np.reshape(SCREAMING_SNAKE_CASE__ , (data_mean.size, 1) )
__UpperCamelCase =np.dot(SCREAMING_SNAKE_CASE__ , centered_data.T ) / features.shape[1]
__UpperCamelCase , __UpperCamelCase =np.linalg.eigh(SCREAMING_SNAKE_CASE__ )
# Take all the columns in the reverse order (-1), and then takes only the first
__UpperCamelCase =eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
__UpperCamelCase =np.dot(filtered_eigenvectors.T , SCREAMING_SNAKE_CASE__ )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=SCREAMING_SNAKE_CASE__ )
logging.error('Dataset empty' )
raise AssertionError
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
__UpperCamelCase , __UpperCamelCase =eigh(
covariance_between_classes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , covariance_within_classes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
__UpperCamelCase =eigenvectors[:, ::-1][:, :dimensions]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =np.linalg.svd(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =svd_matrix[:, 0:dimensions]
__UpperCamelCase =np.dot(filtered_svd_matrix.T , SCREAMING_SNAKE_CASE__ )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=SCREAMING_SNAKE_CASE__ )
logging.error('Dataset empty' )
raise AssertionError
def _UpperCAmelCase ( ):
# Create dummy dataset with 2 classes and 3 features
__UpperCamelCase =np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
__UpperCamelCase =np.array([0, 0, 0, 1, 1] )
__UpperCamelCase =2
__UpperCamelCase =2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(SCREAMING_SNAKE_CASE__ ) as error_info:
__UpperCamelCase =linear_discriminant_analysis(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def _UpperCAmelCase ( ):
__UpperCamelCase =np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
__UpperCamelCase =2
__UpperCamelCase =np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(SCREAMING_SNAKE_CASE__ ) as error_info:
__UpperCamelCase =principal_component_analysis(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
lowerCamelCase : Tuple = 'naver-clova-ix/donut-base'
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = DonutProcessor.from_pretrained(UpperCamelCase )
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowercase__ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowercase__ = self.processor.tokenajson(UpperCamelCase )
self.assertDictEqual(UpperCamelCase , UpperCamelCase )
| 2 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , __a : int , __a : Optional[Any]=13 , __a : str=7 , __a : Optional[int]=True , __a : List[Any]=True , __a : Any=True , __a : List[str]=True , __a : str=99 , __a : int=32 , __a : Any=5 , __a : Union[str, Any]=4 , __a : Optional[int]=37 , __a : Optional[Any]="gelu" , __a : Any=0.1 , __a : str=0.1 , __a : Any=5_12 , __a : Optional[Any]=16 , __a : Dict=2 , __a : Union[str, Any]=0.02 , __a : Any=False , __a : Optional[int]=True , __a : List[Any]="None" , __a : Optional[int]=3 , __a : Dict=4 , __a : List[str]=None , ):
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = relative_attention
_a = position_biased_input
_a = pos_att_type
_a = scope
def UpperCamelCase__ ( self : Optional[int] ):
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self : Optional[int] ):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCamelCase__ ( self : Any ):
_a = self.get_config()
_a = 3_00
return config
def UpperCamelCase__ ( self : List[str] , __a : Dict ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCamelCase__ ( self : List[Any] , __a : int , __a : Dict , __a : Tuple , __a : str , __a : Union[str, Any] , __a : List[str] , __a : List[Any] ):
_a = DebertaModel(config=__a )
model.to(__a )
model.eval()
_a = model(__a , attention_mask=__a , token_type_ids=__a )[0]
_a = model(__a , token_type_ids=__a )[0]
_a = model(__a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCamelCase__ ( self : str , __a : List[str] , __a : Optional[Any] , __a : Tuple , __a : List[Any] , __a : Dict , __a : Optional[Any] , __a : List[str] ):
_a = DebertaForMaskedLM(config=__a )
model.to(__a )
model.eval()
_a = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self : Optional[Any] , __a : int , __a : Optional[int] , __a : Dict , __a : int , __a : Optional[int] , __a : str , __a : Dict ):
_a = self.num_labels
_a = DebertaForSequenceClassification(__a )
model.to(__a )
model.eval()
_a = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__a )
def UpperCamelCase__ ( self : List[str] , __a : int , __a : Dict , __a : Union[str, Any] , __a : Dict , __a : str , __a : Optional[Any] , __a : List[str] ):
_a = self.num_labels
_a = DebertaForTokenClassification(config=__a )
model.to(__a )
model.eval()
_a = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self : Optional[Any] , __a : List[Any] , __a : Dict , __a : List[str] , __a : Optional[int] , __a : Union[str, Any] , __a : int , __a : Optional[Any] ):
_a = DebertaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
_a = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self : List[str] ):
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =(
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a =(
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a =True
__a =False
__a =False
__a =False
__a =False
def UpperCamelCase__ ( self : Tuple ):
_a = DebertaModelTester(self )
_a = ConfigTester(self , config_class=__a , hidden_size=37 )
def UpperCamelCase__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self : str ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__a )
def UpperCamelCase__ ( self : Any ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__a )
def UpperCamelCase__ ( self : List[str] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__a )
def UpperCamelCase__ ( self : List[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__a )
def UpperCamelCase__ ( self : Any ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__a )
@slow
def UpperCamelCase__ ( self : List[str] ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = DebertaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="Model not available yet" )
def UpperCamelCase__ ( self : Tuple ):
pass
@slow
def UpperCamelCase__ ( self : List[Any] ):
_a = DebertaModel.from_pretrained("microsoft/deberta-base" )
_a = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_a = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_a = model(__a , attention_mask=__a )[0]
# compare the actual values for a slice.
_a = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) , f'{output[:, 1:4, 1:4]}' )
| 63 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (A ) -> bool:
"""simple docstring"""
return len(set(A ) ) == len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : float ):
"""simple docstring"""
return 10 - x * x
def UpperCAmelCase__ (snake_case__ : float , snake_case__ : float ):
"""simple docstring"""
if equation(snake_case__ ) * equation(snake_case__ ) >= 0:
raise ValueError("""Wrong space!""" )
_snake_case : Tuple = a
while (b - a) >= 0.01:
# Find middle point
_snake_case : Optional[Any] = (a + b) / 2
# Check if middle point is root
if equation(snake_case__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case__ ) * equation(snake_case__ ) < 0:
_snake_case : Optional[int] = c
else:
_snake_case : Tuple = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 64 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCamelCase : Any = None
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase : List[str] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase : Any = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : int = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ : Optional[int] = TaTokenizer
lowerCAmelCase__ : List[int] = []
def __init__(self : Dict , UpperCamelCase : str=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Any="</s>" , UpperCamelCase : str="<unk>" , UpperCamelCase : List[str]="<pad>" , UpperCamelCase : List[str]=100 , UpperCamelCase : Tuple=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowercase__ = [f"<extra_id_{i}>" for i in range(UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase__ = len(set(filter(lambda UpperCamelCase : bool('''extra_id_''' in str(UpperCamelCase ) ) , UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , extra_ids=UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
lowercase__ = extra_ids
@staticmethod
def UpperCamelCase__ (UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase , )
return max_model_length
def UpperCamelCase__ (self : Any , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase__ = os.path.join(
UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
logger.info(f"Copy vocab file to {out_vocab_file}" )
return (out_vocab_file,)
def UpperCamelCase__ (self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase__ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase ) for token in self.get_sentinel_tokens()]
| 2 | 0 |
import mpmath # for roots of unity
import numpy as np
class A :
def __init__(self : Union[str, Any] , __UpperCAmelCase : Any=None , __UpperCAmelCase : Optional[int]=None ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = list(poly_a or [0] )[:]
UpperCAmelCase__ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
UpperCAmelCase__ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
UpperCAmelCase__ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
UpperCAmelCase__ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
UpperCAmelCase__ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
UpperCAmelCase__ = self.__multiply()
def lowercase_ (self : Dict , __UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(__UpperCAmelCase ) <= 1:
return dft[0]
#
UpperCAmelCase__ = self.c_max_length // 2
while next_ncol > 0:
UpperCAmelCase__ = [[] for i in range(__UpperCAmelCase )]
UpperCAmelCase__ = self.root**next_ncol
# First half of next step
UpperCAmelCase__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
UpperCAmelCase__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
UpperCAmelCase__ = new_dft
UpperCAmelCase__ = next_ncol // 2
return dft[0]
def lowercase_ (self : str ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.__dft("A" )
UpperCAmelCase__ = self.__dft("B" )
UpperCAmelCase__ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
UpperCAmelCase__ = 2
while next_ncol <= self.c_max_length:
UpperCAmelCase__ = [[] for i in range(__UpperCAmelCase )]
UpperCAmelCase__ = self.root ** (next_ncol // 2)
UpperCAmelCase__ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
UpperCAmelCase__ = new_inverse_c
next_ncol *= 2
# Unpack
UpperCAmelCase__ = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__(self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = "A = " + " + ".join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
UpperCAmelCase__ = "B = " + " + ".join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
UpperCAmelCase__ = "A*B = " + " + ".join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return f"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Dict = ShapEImgaImgPipeline
lowerCAmelCase__ : List[str] = ["""image"""]
lowerCAmelCase__ : Any = ["""image"""]
lowerCAmelCase__ : Any = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase__ : Tuple = False
@property
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
return 32
@property
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase__ (self : int ):
'''simple docstring'''
return 8
@property
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase__ = CLIPVisionModel(UpperCamelCase )
return model
@property
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase , do_normalize=UpperCamelCase , do_resize=UpperCamelCase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase__ = PriorTransformer(**UpperCamelCase )
return model
@property
def UpperCamelCase__ (self : int ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase__ = ShapERenderer(**UpperCamelCase )
return model
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.dummy_prior
lowercase__ = self.dummy_image_encoder
lowercase__ = self.dummy_image_processor
lowercase__ = self.dummy_renderer
lowercase__ = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase , clip_sample=UpperCamelCase , clip_sample_range=1.0 , )
lowercase__ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str=0 ):
'''simple docstring'''
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
if str(UpperCamelCase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase )
else:
lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
lowercase__ = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = '''cpu'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
lowercase__ = output.images[0]
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = torch_device == '''cpu'''
lowercase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase , relax_max_difference=UpperCamelCase , )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = 1
lowercase__ = 2
lowercase__ = self.get_dummy_inputs(UpperCamelCase )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ = batch_size * [inputs[key]]
lowercase__ = pipe(**UpperCamelCase , num_images_per_prompt=UpperCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowercase__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
lowercase__ = pipe(
UpperCamelCase , generator=UpperCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 2 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__a = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase : str = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2 | 0 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__=0.9_9_9 , UpperCamelCase__="cosine" , ) -> Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase__ ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase__ ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__lowerCamelCase = []
for i in range(UpperCamelCase__ ):
__lowerCamelCase = i / num_diffusion_timesteps
__lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase__ ) / alpha_bar_fn(UpperCamelCase__ ) , UpperCamelCase__ ) )
return torch.tensor(UpperCamelCase__ , dtype=torch.floataa )
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCamelCase : Dict =[e.name for e in KarrasDiffusionSchedulers]
lowerCamelCase : Optional[Any] =2
@register_to_config
def __init__( self : Optional[Any] , a : int = 10_00 , a : float = 0.0_00_85 , a : float = 0.0_12 , a : str = "linear" , a : Optional[Union[np.ndarray, List[float]]] = None , a : str = "epsilon" , a : Optional[bool] = False , a : Optional[bool] = False , a : float = 1.0 , a : str = "linspace" , a : int = 0 , ):
"""simple docstring"""
if trained_betas is not None:
__lowerCamelCase = torch.tensor(a , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowerCamelCase = torch.linspace(a , a , a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCamelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCamelCase = betas_for_alpha_bar(a , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
__lowerCamelCase = betas_for_alpha_bar(a , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
__lowerCamelCase = 1.0 - self.betas
__lowerCamelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a , a , a )
__lowerCamelCase = use_karras_sigmas
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : Optional[Any] , a : List[Any]=None ):
"""simple docstring"""
if schedule_timesteps is None:
__lowerCamelCase = self.timesteps
__lowerCamelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowerCamelCase = 1 if len(a ) > 1 else 0
else:
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(a ) else timestep
__lowerCamelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : torch.FloatTensor , a : Union[float, torch.FloatTensor] , ):
"""simple docstring"""
__lowerCamelCase = self.index_for_timestep(a )
__lowerCamelCase = self.sigmas[step_index]
__lowerCamelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def SCREAMING_SNAKE_CASE__ ( self : Any , a : int , a : Union[str, torch.device] = None , a : Optional[int] = None , ):
"""simple docstring"""
__lowerCamelCase = num_inference_steps
__lowerCamelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowerCamelCase = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowerCamelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowerCamelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(a , 0 , -step_ratio )).round().copy().astype(a )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
__lowerCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowerCamelCase = np.log(a )
__lowerCamelCase = np.interp(a , np.arange(0 , len(a ) ) , a )
if self.config.use_karras_sigmas:
__lowerCamelCase = self._convert_to_karras(in_sigmas=a , num_inference_steps=self.num_inference_steps )
__lowerCamelCase = np.array([self._sigma_to_t(a , a ) for sigma in sigmas] )
__lowerCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowerCamelCase = torch.from_numpy(a ).to(device=a )
__lowerCamelCase = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__lowerCamelCase = torch.from_numpy(a )
__lowerCamelCase = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a ).startswith('''mps''' ):
# mps does not support float64
__lowerCamelCase = timesteps.to(a , dtype=torch.floataa )
else:
__lowerCamelCase = timesteps.to(device=a )
# empty dt and derivative
__lowerCamelCase = None
__lowerCamelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowerCamelCase = defaultdict(a )
def SCREAMING_SNAKE_CASE__ ( self : int , a : Any , a : List[Any] ):
"""simple docstring"""
__lowerCamelCase = np.log(a )
# get distribution
__lowerCamelCase = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__lowerCamelCase = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__lowerCamelCase = low_idx + 1
__lowerCamelCase = log_sigmas[low_idx]
__lowerCamelCase = log_sigmas[high_idx]
# interpolate sigmas
__lowerCamelCase = (low - log_sigma) / (low - high)
__lowerCamelCase = np.clip(a , 0 , 1 )
# transform interpolation to time range
__lowerCamelCase = (1 - w) * low_idx + w * high_idx
__lowerCamelCase = t.reshape(sigma.shape )
return t
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : torch.FloatTensor , a : Dict ):
"""simple docstring"""
__lowerCamelCase = in_sigmas[-1].item()
__lowerCamelCase = in_sigmas[0].item()
__lowerCamelCase = 7.0 # 7.0 is the value used in the paper
__lowerCamelCase = np.linspace(0 , 1 , a )
__lowerCamelCase = sigma_min ** (1 / rho)
__lowerCamelCase = sigma_max ** (1 / rho)
__lowerCamelCase = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
return self.dt is None
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Union[torch.FloatTensor, np.ndarray] , a : Union[float, torch.FloatTensor] , a : Union[torch.FloatTensor, np.ndarray] , a : bool = True , ):
"""simple docstring"""
__lowerCamelCase = self.index_for_timestep(a )
# advance index counter by 1
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
__lowerCamelCase = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__lowerCamelCase = self.sigmas[step_index - 1]
__lowerCamelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowerCamelCase = 0
__lowerCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_next
__lowerCamelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_next
__lowerCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__lowerCamelCase = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
__lowerCamelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowerCamelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowerCamelCase = sigma_next - sigma_hat
# store for 2nd order step
__lowerCamelCase = derivative
__lowerCamelCase = dt
__lowerCamelCase = sample
else:
# 2. 2nd order / Heun's method
__lowerCamelCase = (sample - pred_original_sample) / sigma_next
__lowerCamelCase = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__lowerCamelCase = self.dt
__lowerCamelCase = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ):
"""simple docstring"""
__lowerCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a ):
# mps does not support float64
__lowerCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowerCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowerCamelCase = self.timesteps.to(original_samples.device )
__lowerCamelCase = timesteps.to(original_samples.device )
__lowerCamelCase = [self.index_for_timestep(a , a ) for t in timesteps]
__lowerCamelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowerCamelCase = sigma.unsqueeze(-1 )
__lowerCamelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self : Tuple ):
"""simple docstring"""
return self.config.num_train_timesteps
| 67 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = """realm"""
def __init__(self : str , UpperCamelCase : List[Any]=30522 , UpperCamelCase : List[Any]=768 , UpperCamelCase : int=128 , UpperCamelCase : Any=12 , UpperCamelCase : Tuple=12 , UpperCamelCase : List[Any]=8 , UpperCamelCase : Union[str, Any]=3072 , UpperCamelCase : List[str]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Dict=512 , UpperCamelCase : Dict=2 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : List[Any]=1E-12 , UpperCamelCase : Dict=256 , UpperCamelCase : Union[str, Any]=10 , UpperCamelCase : Optional[int]=1E-3 , UpperCamelCase : Tuple=5 , UpperCamelCase : Optional[int]=320 , UpperCamelCase : List[str]=13353718 , UpperCamelCase : Optional[Any]=5000 , UpperCamelCase : str=1 , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : List[Any]=2 , **UpperCamelCase : int , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
# Common config
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = retriever_proj_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_candidates
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = type_vocab_size
lowercase__ = layer_norm_eps
# Reader config
lowercase__ = span_hidden_size
lowercase__ = max_span_width
lowercase__ = reader_layer_norm_eps
lowercase__ = reader_beam_size
lowercase__ = reader_seq_len
# Retrieval config
lowercase__ = num_block_records
lowercase__ = searcher_beam_size
| 2 | 0 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class a__ ( snake_case ):
"""simple docstring"""
def __get__( self , lowercase , lowercase=None ) -> Optional[Any]:
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
A__ = "__cached_" + self.fget.__name__
A__ = getattr(lowercase , lowercase , lowercase )
if cached is None:
A__ = self.fget(lowercase )
setattr(lowercase , lowercase , lowercase )
return cached
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] ) -> Any:
'''simple docstring'''
A__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'invalid truth value {val!r}' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
if is_torch_fx_proxy(SCREAMING_SNAKE_CASE_ ):
return True
if is_torch_available():
import torch
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(SCREAMING_SNAKE_CASE_ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(SCREAMING_SNAKE_CASE_ , (jnp.ndarray, Tracer) ):
return True
return isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Tuple:
'''simple docstring'''
return isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> Optional[Any]:
'''simple docstring'''
return _is_numpy(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> Optional[Any]:
'''simple docstring'''
import torch
return isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Any:
'''simple docstring'''
return False if not is_torch_available() else _is_torch(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> str:
'''simple docstring'''
import torch
return isinstance(SCREAMING_SNAKE_CASE_ , torch.device )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Tuple:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Tuple:
'''simple docstring'''
import torch
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
return False
return isinstance(SCREAMING_SNAKE_CASE_ , torch.dtype )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[Any]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
import tensorflow as tf
return isinstance(SCREAMING_SNAKE_CASE_ , tf.Tensor )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Union[str, Any]:
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(SCREAMING_SNAKE_CASE_ , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(SCREAMING_SNAKE_CASE_ )
return type(SCREAMING_SNAKE_CASE_ ) == tf.Tensor
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> int:
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> str:
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(SCREAMING_SNAKE_CASE_ , jnp.ndarray )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Any:
'''simple docstring'''
return False if not is_flax_available() else _is_jax(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> str:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , (dict, UserDict) ):
return {k: to_py_obj(SCREAMING_SNAKE_CASE_ ) for k, v in obj.items()}
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
return [to_py_obj(SCREAMING_SNAKE_CASE_ ) for o in obj]
elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ):
return obj.numpy().tolist()
elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ):
return np.asarray(SCREAMING_SNAKE_CASE_ ).tolist()
elif isinstance(SCREAMING_SNAKE_CASE_ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , (dict, UserDict) ):
return {k: to_numpy(SCREAMING_SNAKE_CASE_ ) for k, v in obj.items()}
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
return np.array(SCREAMING_SNAKE_CASE_ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ):
return obj.numpy()
elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ):
return np.asarray(SCREAMING_SNAKE_CASE_ )
else:
return obj
class a__ ( snake_case ):
"""simple docstring"""
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = fields(self )
# Safety and consistency checks
if not len(lowercase ):
raise ValueError(F'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'{self.__class__.__name__} should not have more than one required field.' )
A__ = getattr(self , class_fields[0].name )
A__ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowercase ):
if isinstance(lowercase , lowercase ):
A__ = first_field.items()
A__ = True
else:
try:
A__ = iter(lowercase )
A__ = True
except TypeError:
A__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowercase ):
if (
not isinstance(lowercase , (list, tuple) )
or not len(lowercase ) == 2
or not isinstance(element[0] , lowercase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
A__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
A__ = element[1]
elif first_field is not None:
A__ = first_field
else:
for field in class_fields:
A__ = getattr(self , field.name )
if v is not None:
A__ = v
def __delitem__( self , *lowercase , **lowercase ) -> int:
'''simple docstring'''
raise Exception(F'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def UpperCamelCase ( self , *lowercase , **lowercase ) -> Optional[int]:
'''simple docstring'''
raise Exception(F'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def UpperCamelCase ( self , *lowercase , **lowercase ) -> Dict:
'''simple docstring'''
raise Exception(F'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def UpperCamelCase ( self , *lowercase , **lowercase ) -> List[Any]:
'''simple docstring'''
raise Exception(F'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self , lowercase ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowercase , lowercase ):
A__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , lowercase , lowercase ) -> str:
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowercase , lowercase )
super().__setattr__(lowercase , lowercase )
def __setitem__( self , lowercase , lowercase ) -> str:
'''simple docstring'''
super().__setitem__(lowercase , lowercase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowercase , lowercase )
def UpperCamelCase ( self ) -> Tuple[Any]:
'''simple docstring'''
return tuple(self[k] for k in self.keys() )
class a__ ( snake_case , snake_case ):
"""simple docstring"""
@classmethod
def UpperCamelCase ( cls , lowercase ) -> Optional[int]:
'''simple docstring'''
raise ValueError(
F'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'longest'
__lowerCamelCase = 'max_length'
__lowerCamelCase = 'do_not_pad'
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'pt'
__lowerCamelCase = 'tf'
__lowerCamelCase = 'np'
__lowerCamelCase = 'jax'
class a__ :
"""simple docstring"""
def __init__( self , lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = context_managers
A__ = ExitStack()
def __enter__( self ) -> Union[str, Any]:
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(lowercase )
def __exit__( self , *lowercase , **lowercase ) -> Union[str, Any]:
'''simple docstring'''
self.stack.__exit__(*lowercase , **lowercase )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
A__ = infer_framework(SCREAMING_SNAKE_CASE_ )
if framework == "tf":
A__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
A__ = inspect.signature(model_class.forward ) # PyTorch models
else:
A__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> str:
'''simple docstring'''
A__ = model_class.__name__
A__ = infer_framework(SCREAMING_SNAKE_CASE_ )
if framework == "tf":
A__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
A__ = inspect.signature(model_class.forward ) # PyTorch models
else:
A__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: MutableMapping , SCREAMING_SNAKE_CASE_: str = "" , SCREAMING_SNAKE_CASE_: str = "." ) -> Union[str, Any]:
'''simple docstring'''
def _flatten_dict(SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: str="" , SCREAMING_SNAKE_CASE_: Any="." ):
for k, v in d.items():
A__ = str(SCREAMING_SNAKE_CASE_ ) + delimiter + str(SCREAMING_SNAKE_CASE_ ) if parent_key else k
if v and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
yield from flatten_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , delimiter=SCREAMING_SNAKE_CASE_ ).items()
else:
yield key, v
return dict(_flatten_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
@contextmanager
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: bool = False ) -> Dict:
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: List[str]=None ) -> List[Any]:
'''simple docstring'''
if is_numpy_array(SCREAMING_SNAKE_CASE_ ):
return np.transpose(SCREAMING_SNAKE_CASE_ , axes=SCREAMING_SNAKE_CASE_ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
return array.T if axes is None else array.permute(*SCREAMING_SNAKE_CASE_ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ):
import tensorflow as tf
return tf.transpose(SCREAMING_SNAKE_CASE_ , perm=SCREAMING_SNAKE_CASE_ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ):
return jnp.transpose(SCREAMING_SNAKE_CASE_ , axes=SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(F'Type not supported for transpose: {type(SCREAMING_SNAKE_CASE_ )}.' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] ) -> int:
'''simple docstring'''
if is_numpy_array(SCREAMING_SNAKE_CASE_ ):
return np.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
return array.reshape(*SCREAMING_SNAKE_CASE_ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ):
import tensorflow as tf
return tf.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ):
return jnp.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(F'Type not supported for reshape: {type(SCREAMING_SNAKE_CASE_ )}.' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[str]=None ) -> str:
'''simple docstring'''
if is_numpy_array(SCREAMING_SNAKE_CASE_ ):
return np.squeeze(SCREAMING_SNAKE_CASE_ , axis=SCREAMING_SNAKE_CASE_ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
return array.squeeze() if axis is None else array.squeeze(dim=SCREAMING_SNAKE_CASE_ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ):
import tensorflow as tf
return tf.squeeze(SCREAMING_SNAKE_CASE_ , axis=SCREAMING_SNAKE_CASE_ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ):
return jnp.squeeze(SCREAMING_SNAKE_CASE_ , axis=SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(F'Type not supported for squeeze: {type(SCREAMING_SNAKE_CASE_ )}.' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Any ) -> Union[str, Any]:
'''simple docstring'''
if is_numpy_array(SCREAMING_SNAKE_CASE_ ):
return np.expand_dims(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
return array.unsqueeze(dim=SCREAMING_SNAKE_CASE_ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ):
import tensorflow as tf
return tf.expand_dims(SCREAMING_SNAKE_CASE_ , axis=SCREAMING_SNAKE_CASE_ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ):
return jnp.expand_dims(SCREAMING_SNAKE_CASE_ , axis=SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(F'Type not supported for expand_dims: {type(SCREAMING_SNAKE_CASE_ )}.' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Tuple:
'''simple docstring'''
if is_numpy_array(SCREAMING_SNAKE_CASE_ ):
return np.size(SCREAMING_SNAKE_CASE_ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
return array.numel()
elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ):
import tensorflow as tf
return tf.size(SCREAMING_SNAKE_CASE_ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ):
return array.size
else:
raise ValueError(F'Type not supported for expand_dims: {type(SCREAMING_SNAKE_CASE_ )}.' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Dict:
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(SCREAMING_SNAKE_CASE_ , (tuple, list) ):
A__ = [F'{repo_id}--{v}' if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
A__ = F'{repo_id}--{value}'
return auto_map
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> Union[str, Any]:
'''simple docstring'''
for base_class in inspect.getmro(SCREAMING_SNAKE_CASE_ ):
A__ = base_class.__module__
A__ = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'Could not infer framework from class {model_class}.' )
| 68 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : int = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = """mvp"""
lowerCAmelCase__ : Optional[Any] = ["""past_key_values"""]
lowerCAmelCase__ : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self : Any , UpperCamelCase : Optional[int]=50267 , UpperCamelCase : Tuple=1024 , UpperCamelCase : int=12 , UpperCamelCase : Tuple=4096 , UpperCamelCase : Dict=16 , UpperCamelCase : int=12 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : Optional[int]=16 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Union[str, Any]=1024 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : str=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : List[str]=False , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=1 , UpperCamelCase : int=0 , UpperCamelCase : int=2 , UpperCamelCase : Any=True , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Tuple=False , UpperCamelCase : int=100 , UpperCamelCase : Optional[Any]=800 , **UpperCamelCase : str , ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = classifier_dropout
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = use_prompt
lowercase__ = prompt_length
lowercase__ = prompt_mid_dim
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , **UpperCamelCase , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , UpperCamelCase ):
lowercase__ = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
'''The config can simply be saved and uploaded again to be fixed.''' )
| 2 | 0 |
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def UpperCAmelCase ( UpperCAmelCase ) -> Callable:
@wraps(UpperCAmelCase )
def _inner_fn(*UpperCAmelCase , **UpperCAmelCase ):
warnings.warn(
(f'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , UpperCAmelCase , )
return fn(*UpperCAmelCase , **UpperCAmelCase )
return _inner_fn
| 69 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : List[str] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : int = DebertaVaTokenizer
lowerCAmelCase__ : List[Any] = DebertaVaTokenizerFast
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Tuple = True
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = DebertaVaTokenizer(UpperCamelCase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = '''this is a test'''
lowercase__ = '''this is a test'''
return input_text, output_text
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''<pad>'''
lowercase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(UpperCamelCase ) , 30001 )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = ''' \tHeLLo!how \n Are yoU? '''
lowercase__ = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = ''' \tHeLLo!how \n Are yoU? '''
lowercase__ = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = '''This is a test'''
lowercase__ = [13, 1, 4398, 25, 21, 1289]
lowercase__ = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__ = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__ = DebertaVaTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , keep_accents=UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# fmt: off
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase__ = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = DebertaVaTokenizer(UpperCamelCase )
lowercase__ = tokenizer.encode('''sequence builders''' )
lowercase__ = tokenizer.encode('''multi-sequence build''' )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase , )
@slow
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 2 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : int =logging.get_logger(__name__)
A__ : Union[str, Any] ={
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Optional[int] = '''falcon'''
_lowercase: Dict = ['''past_key_values''']
def __init__( self : Optional[int] , __snake_case : int=6_50_24 , __snake_case : Optional[int]=45_44 , __snake_case : str=32 , __snake_case : Dict=71 , __snake_case : List[Any]=1E-5 , __snake_case : Union[str, Any]=0.02 , __snake_case : Optional[int]=True , __snake_case : Optional[Any]=0.0 , __snake_case : str=0.0 , __snake_case : Optional[Any]=None , __snake_case : Union[str, Any]=False , __snake_case : List[str]=False , __snake_case : int=True , __snake_case : int=True , __snake_case : Any=False , __snake_case : str=11 , __snake_case : int=11 , **__snake_case : Optional[int] , ) -> List[str]:
_lowerCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_lowerCAmelCase = kwargs.pop("""n_embed""" , __snake_case )
_lowerCAmelCase = hidden_size if n_embed is None else n_embed
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = use_cache
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_lowerCAmelCase = alibi
_lowerCAmelCase = new_decoder_architecture
_lowerCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_lowerCAmelCase = parallel_attn
_lowerCAmelCase = bias
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@property
def lowercase__ ( self : str ) -> Optional[Any]:
return self.hidden_size // self.num_attention_heads
@property
def lowercase__ ( self : Any ) -> Tuple:
return not self.alibi
| 70 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _SCREAMING_SNAKE_CASE (A ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A , A )
def _SCREAMING_SNAKE_CASE (A ) -> List[str]:
"""simple docstring"""
lowercase__ ,lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(A , A , bias=A )
lowercase__ = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE (A , A="facebook/mbart-large-en-ro" , A=False , A=False ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = torch.load(A , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A )
lowercase__ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase__ = MBartConfig.from_pretrained(A , vocab_size=A )
if mbart_aa and finetuned:
lowercase__ = '''relu'''
lowercase__ = state_dict['''decoder.embed_tokens.weight''']
lowercase__ = MBartForConditionalGeneration(A )
model.model.load_state_dict(A )
if finetuned:
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
lowerCamelCase : Any = parser.parse_args()
lowerCamelCase : List[str] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 2 | 0 |
from __future__ import annotations
def A ( a_ ,a_ = None ,a_ = None ) -> None:
if start is None:
__UpperCamelCase : int =0
if end is None:
__UpperCamelCase : Optional[Any] =len(a_ ) - 1
if start >= end:
return
__UpperCamelCase : List[Any] =(start + end) // 2
slowsort(a_ ,a_ ,a_ )
slowsort(a_ ,mid + 1 ,a_ )
if sequence[end] < sequence[mid]:
__UpperCamelCase , __UpperCamelCase : Dict =sequence[mid], sequence[end]
slowsort(a_ ,a_ ,end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 71 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowerCamelCase : List[Any] = logging.getLogger(__name__)
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCamelCase : Any=-1 ):
'''simple docstring'''
lowercase__ = label_idx
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[Split, str] ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ):
lowercase__ = mode.value
lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" )
lowercase__ = 1
lowercase__ = []
with open(UpperCamelCase , encoding='''utf-8''' ) as f:
lowercase__ = []
lowercase__ = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) )
guid_index += 1
lowercase__ = []
lowercase__ = []
else:
lowercase__ = line.split(''' ''' )
words.append(splits[0] )
if len(UpperCamelCase ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) )
return examples
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ):
'''simple docstring'''
lowercase__ = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(UpperCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowercase__ = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(UpperCamelCase )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
if path:
with open(UpperCamelCase , '''r''' ) as f:
lowercase__ = f.read().splitlines()
if "O" not in labels:
lowercase__ = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : List[Any] ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : str ):
'''simple docstring'''
if path:
with open(UpperCamelCase , '''r''' ) as f:
lowercase__ = f.read().splitlines()
if "O" not in labels:
lowercase__ = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def UpperCamelCase__ (self : Tuple , UpperCamelCase : int , UpperCamelCase : Union[Split, str] ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ):
lowercase__ = mode.value
lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" )
lowercase__ = 1
lowercase__ = []
with open(UpperCamelCase , encoding='''utf-8''' ) as f:
for sentence in parse_incr(UpperCamelCase ):
lowercase__ = []
lowercase__ = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(UpperCamelCase ) == len(UpperCamelCase )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) )
guid_index += 1
return examples
def UpperCamelCase__ (self : Tuple , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ):
'''simple docstring'''
lowercase__ = 0
for sentence in parse_incr(UpperCamelCase ):
lowercase__ = preds_list[example_id]
lowercase__ = ''''''
for token in sentence:
out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) "
out += "\n"
writer.write(UpperCamelCase )
example_id += 1
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
if path:
with open(UpperCamelCase , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 2 | 0 |
"""simple docstring"""
def snake_case_ ( A_ : str ):
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(A_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 72 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = """megatron-bert"""
def __init__(self : Tuple , UpperCamelCase : Optional[int]=29056 , UpperCamelCase : Optional[Any]=1024 , UpperCamelCase : Any=24 , UpperCamelCase : int=16 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : int="gelu" , UpperCamelCase : int=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Any=512 , UpperCamelCase : int=2 , UpperCamelCase : Dict=0.02 , UpperCamelCase : Dict=1E-12 , UpperCamelCase : List[Any]=0 , UpperCamelCase : Optional[int]="absolute" , UpperCamelCase : List[Any]=True , **UpperCamelCase : str , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
| 2 | 0 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class A_ :
_UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
_UpperCAmelCase : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for training.'''} )
_UpperCAmelCase : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} )
_UpperCAmelCase : Optional[float] = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} )
_UpperCAmelCase : Optional[int] = field(
default=10_000 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
_UpperCAmelCase : Optional[float] = field(default=2E-4 , metadata={'''help''': '''Learning rate fo training.'''} )
_UpperCAmelCase : Optional[str] = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} )
_UpperCAmelCase : Optional[int] = field(
default=750 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
_UpperCAmelCase : Optional[int] = field(
default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} )
_UpperCAmelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
_UpperCAmelCase : Optional[int] = field(default=50_000 , metadata={'''help''': '''Maximum number of training steps.'''} )
_UpperCAmelCase : Optional[int] = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
_UpperCAmelCase : Optional[int] = field(default=1_024 , metadata={'''help''': '''Sequence lengths used for training.'''} )
_UpperCAmelCase : Optional[int] = field(default=1 , metadata={'''help''': '''Training seed.'''} )
_UpperCAmelCase : Optional[int] = field(
default=1_024 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , )
_UpperCAmelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
_UpperCAmelCase : Optional[bool] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class A_ :
_UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
_UpperCAmelCase : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} )
_UpperCAmelCase : Optional[int] = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
_UpperCAmelCase : Optional[int] = field(default=1_024 , metadata={'''help''': '''Length of sequences to be evaluated.'''} )
_UpperCAmelCase : Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class A_ :
_UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
_UpperCAmelCase : Optional[int] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
_UpperCAmelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , )
_UpperCAmelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
_UpperCAmelCase : Optional[float] = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} )
_UpperCAmelCase : Optional[int] = field(default=256 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
_UpperCAmelCase : Optional[int] = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} )
_UpperCAmelCase : Optional[float] = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
_UpperCAmelCase : Optional[int] = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} )
_UpperCAmelCase : Optional[int] = field(
default=200 , metadata={'''help''': '''Number of completions to generate for each sample.'''} )
_UpperCAmelCase : Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
_UpperCAmelCase : Optional[int] = field(
default=-1 , metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
} , )
@dataclass
class A_ :
_UpperCAmelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
} , )
_UpperCAmelCase : Optional[str] = field(
default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} )
_UpperCAmelCase : Optional[int] = field(
default=100_000 , metadata={'''help''': '''Number of files to save per JSON output file.'''} )
_UpperCAmelCase : Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
_UpperCAmelCase : Optional[float] = field(
default=1_000 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
_UpperCAmelCase : Optional[float] = field(
default=100 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
_UpperCAmelCase : Optional[float] = field(
default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
_UpperCAmelCase : Optional[float] = field(
default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
_UpperCAmelCase : Optional[float] = field(
default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , )
_UpperCAmelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
_UpperCAmelCase : Optional[float] = field(
default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class A_ :
_UpperCAmelCase : Optional[str] = field(
default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} )
_UpperCAmelCase : Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
_UpperCAmelCase : Optional[int] = field(default=200_000 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
_UpperCAmelCase : Optional[int] = field(
default=32_768 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
_UpperCAmelCase : Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} )
_UpperCAmelCase : Optional[bool] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class A_ :
_UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} )
_UpperCAmelCase : Optional[int] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class A_ :
_UpperCAmelCase : Optional[str] = field(
default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} )
_UpperCAmelCase : Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} )
_UpperCAmelCase : Optional[bool] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
| 73 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
lowerCamelCase : Any = re.compile(R'([A-Z]+)([A-Z][a-z])')
lowerCamelCase : str = re.compile(R'([a-z\d])([A-Z])')
lowerCamelCase : Optional[int] = re.compile(R'(?<!_)_(?!_)')
lowerCamelCase : List[Any] = re.compile(R'(_{2,})')
lowerCamelCase : str = R'^\w+(\.\w+)*$'
lowerCamelCase : Dict = R'<>:/\|?*'
def _SCREAMING_SNAKE_CASE (A ) -> Any:
"""simple docstring"""
lowercase__ = _uppercase_uppercase_re.sub(R'''\1_\2''' , A )
lowercase__ = _lowercase_uppercase_re.sub(R'''\1_\2''' , A )
return name.lower()
def _SCREAMING_SNAKE_CASE (A ) -> Tuple:
"""simple docstring"""
lowercase__ = _single_underscore_re.split(A )
lowercase__ = [_multiple_underscores_re.split(A ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(A ) if n != '''''' )
def _SCREAMING_SNAKE_CASE (A ) -> Tuple:
"""simple docstring"""
if os.path.basename(A ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]:
"""simple docstring"""
if os.path.basename(A ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , A ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(A )}-{split}"
def _SCREAMING_SNAKE_CASE (A , A , A , A=None ) -> List[str]:
"""simple docstring"""
lowercase__ = filename_prefix_for_split(A , A )
if filetype_suffix:
prefix += f".{filetype_suffix}"
lowercase__ = os.path.join(A , A )
return f"{filepath}*"
def _SCREAMING_SNAKE_CASE (A , A , A , A=None , A=None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = filename_prefix_for_split(A , A )
lowercase__ = os.path.join(A , A )
if shard_lengths:
lowercase__ = len(A )
lowercase__ = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(A )]
if filetype_suffix:
lowercase__ = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
lowercase__ = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 2 | 0 |
"""simple docstring"""
import os
import sys
import unittest
_lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_lowercase = os.path.join(git_repo_path, '''src''', '''diffusers''')
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
A = find_backend(' if not is_torch_available():' )
self.assertEqual(A_ ,'torch' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A = find_backend(' if not (is_torch_available() and is_transformers_available()):' )
self.assertEqual(A_ ,'torch_and_transformers' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' )
self.assertEqual(A_ ,'torch_and_transformers_and_onnx' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
A = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' ,A_ )
self.assertIn('torch_and_transformers' ,A_ )
self.assertIn('flax_and_transformers' ,A_ )
self.assertIn('torch_and_transformers_and_onnx' ,A_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' ,objects['torch'] )
self.assertIn('FlaxUNet2DConditionModel' ,objects['flax'] )
self.assertIn('StableDiffusionPipeline' ,objects['torch_and_transformers'] )
self.assertIn('FlaxStableDiffusionPipeline' ,objects['flax_and_transformers'] )
self.assertIn('LMSDiscreteScheduler' ,objects['torch_and_scipy'] )
self.assertIn('OnnxStableDiffusionPipeline' ,objects['torch_and_transformers_and_onnx'] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
A = create_dummy_object('CONSTANT' ,'\'torch\'' )
self.assertEqual(A_ ,'\nCONSTANT = None\n' )
A = create_dummy_object('function' ,'\'torch\'' )
self.assertEqual(
A_ ,'\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
A = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
A = create_dummy_object('FakeClass' ,'\'torch\'' )
self.assertEqual(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
A = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
A = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] ,A_ ) | 74 |
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : str , UpperCamelCase : Tuple , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Optional[int]=13 , UpperCamelCase : Tuple=16 , UpperCamelCase : Union[str, Any]=7 , UpperCamelCase : List[Any]=True , UpperCamelCase : List[str]=True , UpperCamelCase : str=True , UpperCamelCase : Tuple=False , UpperCamelCase : str=True , UpperCamelCase : Tuple=2 , UpperCamelCase : Optional[int]=32 , UpperCamelCase : Any=4 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Tuple=30 , UpperCamelCase : str=0 , UpperCamelCase : Tuple=1 , UpperCamelCase : List[Any]=2 , UpperCamelCase : str=None , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = decoder_seq_length
# For common tests
lowercase__ = self.decoder_seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_model
lowercase__ = decoder_layers
lowercase__ = decoder_layers
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_attention_heads
lowercase__ = decoder_attention_heads
lowercase__ = eos_token_id
lowercase__ = bos_token_id
lowercase__ = pad_token_id
lowercase__ = decoder_start_token_id
lowercase__ = use_cache
lowercase__ = max_position_embeddings
lowercase__ = None
lowercase__ = decoder_seq_length
lowercase__ = 2
lowercase__ = 1
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
lowercase__ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def UpperCamelCase__ (self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , ):
'''simple docstring'''
lowercase__ = True
lowercase__ = TrOCRDecoder(config=UpperCamelCase ).to(UpperCamelCase ).eval()
lowercase__ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase )
lowercase__ = model(UpperCamelCase )
lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 )
lowercase__ = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
lowercase__ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
lowercase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__ = model(UpperCamelCase )['''last_hidden_state''']
lowercase__ = model(UpperCamelCase , past_key_values=UpperCamelCase )['''last_hidden_state''']
# select random slice
lowercase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
lowercase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase (lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCAmelCase__ : List[Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCAmelCase__ : Optional[Any] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : List[str] = False
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
lowercase__ = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCamelCase )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*UpperCamelCase )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
| 2 | 0 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=30, lowerCAmelCase=2, lowerCAmelCase=3, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=10, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=0.6, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =patch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =is_training
lowerCamelCase_ =use_labels
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =mask_ratio
lowerCamelCase_ =scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase_ =(image_size // patch_size) ** 2
lowerCamelCase_ =int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =self.get_config()
return config, pixel_values, labels
def lowercase__ ( self ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCAmelCase, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =ViTMAEModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =ViTMAEForPreTraining(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =(self.image_size // self.patch_size) ** 2
lowerCamelCase_ =self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase_ =1
lowerCamelCase_ =ViTMAEForPreTraining(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =config_and_inputs
lowerCamelCase_ ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Optional[int] =(ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowercase : Dict ={'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowercase : Union[str, Any] =False
lowercase : str =False
lowercase : List[Any] =False
lowercase : str =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ViTMAEModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, has_text_modality=lowerCAmelCase, hidden_size=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowerCamelCase_ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase, nn.Linear ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(lowerCAmelCase )
lowerCamelCase_ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ =[*signature.parameters.keys()]
lowerCamelCase_ =['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
np.random.seed(2 )
lowerCamelCase_ =int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCamelCase_ =np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase_ =torch.from_numpy(lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase_ =pt_noise
super().check_pt_tf_models(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase_ =model(**self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) )
lowerCamelCase_ =outputs[0].cpu().numpy()
lowerCamelCase_ =0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =model_class.from_pretrained(lowerCAmelCase )
model.to(lowerCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase_ =model(**self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) )
# Make sure we don't have nans
lowerCamelCase_ =after_outputs[0].cpu().numpy()
lowerCamelCase_ =0
lowerCamelCase_ =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase, 1e-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =ViTMAEModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def a_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowercase__ ( self ):
"""simple docstring"""
np.random.seed(2 )
lowerCamelCase_ =ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(lowerCAmelCase )
lowerCamelCase_ =self.default_image_processor
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=lowerCAmelCase, return_tensors='''pt''' ).to(lowerCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase_ =ViTMAEConfig()
lowerCamelCase_ =int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase_ =np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCamelCase_ =model(**lowerCAmelCase, noise=torch.from_numpy(lowerCAmelCase ).to(device=lowerCAmelCase ) )
# verify the logits
lowerCamelCase_ =torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice.to(lowerCAmelCase ), atol=1e-4 ) )
| 75 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
if not isinstance(A , A ):
raise TypeError('''only integers accepted as input''' )
else:
lowercase__ = str(abs(A ) )
lowercase__ = [list(A ) for char in range(len(A ) )]
for index in range(len(A ) ):
num_transpositions[index].pop(A )
return max(
int(''''''.join(list(A ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 2 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for part_id in partition_order:
SCREAMING_SNAKE_CASE : int = df.where(f"SPARK_PARTITION_ID() = {part_id}").collect()
for row_idx, row in enumerate(_a):
expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()))
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : str = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
SCREAMING_SNAKE_CASE : Dict = spark.range(100).repartition(1)
SCREAMING_SNAKE_CASE : List[str] = Spark(_a)
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16)
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[Any] = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
SCREAMING_SNAKE_CASE : str = spark.range(10).repartition(2)
SCREAMING_SNAKE_CASE : List[str] = [1, 0]
SCREAMING_SNAKE_CASE : Union[str, Any] = _generate_iterable_examples(_a , _a) # Reverse the partitions.
SCREAMING_SNAKE_CASE : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , _a)
for i, (row_id, row_dict) in enumerate(generate_fn()):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
SCREAMING_SNAKE_CASE : Optional[Any] = spark.range(10).repartition(1)
SCREAMING_SNAKE_CASE : str = SparkExamplesIterable(_a)
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_a):
assert row_id == f"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[Any] = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
SCREAMING_SNAKE_CASE : Dict = spark.range(30).repartition(3)
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator") as generator_mock:
SCREAMING_SNAKE_CASE : str = lambda _a: x.reverse()
SCREAMING_SNAKE_CASE : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , [2, 1, 0])
SCREAMING_SNAKE_CASE : Any = SparkExamplesIterable(_a).shuffle_data_sources(_a)
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : List[Any] = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
SCREAMING_SNAKE_CASE : Union[str, Any] = spark.range(20).repartition(4)
# Partitions 0 and 2
SCREAMING_SNAKE_CASE : Tuple = SparkExamplesIterable(_a).shard_data_sources(worker_id=0 , num_workers=2)
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , [0, 2])
for i, (row_id, row_dict) in enumerate(_a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
SCREAMING_SNAKE_CASE : Optional[Any] = SparkExamplesIterable(_a).shard_data_sources(worker_id=1 , num_workers=2)
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , [1, 3])
for i, (row_id, row_dict) in enumerate(_a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : List[Any] = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
SCREAMING_SNAKE_CASE : Optional[Any] = spark.range(100).repartition(1)
SCREAMING_SNAKE_CASE : Optional[Any] = Spark(_a)
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1)
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100 | 76 |
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowerCamelCase : str = Mapping[str, np.ndarray]
lowerCamelCase : List[Any] = Mapping[str, Any] # Is a nested dict.
lowerCamelCase : Any = 0.0_1
@dataclasses.dataclass(frozen=lowercase_ )
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowerCAmelCase__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowerCAmelCase__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowerCAmelCase__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowerCAmelCase__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
lowerCAmelCase__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
lowerCAmelCase__ : Optional[Sequence[int]] = None
def _SCREAMING_SNAKE_CASE (A ) -> Protein:
"""simple docstring"""
lowercase__ = R'''(\[[A-Z]+\]\n)'''
lowercase__ = [tag.strip() for tag in re.split(A , A ) if len(A ) > 0]
lowercase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
lowercase__ = ["N", "CA", "C"]
lowercase__ = None
lowercase__ = None
lowercase__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowercase__ = g[1][0].strip()
for i in range(len(A ) ):
if seq[i] not in residue_constants.restypes:
lowercase__ = '''X''' # FIXME: strings are immutable
lowercase__ = np.array(
[residue_constants.restype_order.get(A , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowercase__ = []
for axis in range(3 ):
tertiary.append(list(map(A , g[1][axis].split() ) ) )
lowercase__ = np.array(A )
lowercase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(A ):
lowercase__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowercase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
lowercase__ = np.zeros(
(
len(A ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(A ):
lowercase__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=A , atom_mask=A , aatype=A , residue_index=np.arange(len(A ) ) , b_factors=A , )
def _SCREAMING_SNAKE_CASE (A , A = 0 ) -> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = prot.remark
if remark is not None:
pdb_headers.append(f"REMARK {remark}" )
lowercase__ = prot.parents
lowercase__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowercase__ = [p for i, p in zip(A , A ) if i == chain_id]
if parents is None or len(A ) == 0:
lowercase__ = ['''N/A''']
pdb_headers.append(f"PARENT {' '.join(A )}" )
return pdb_headers
def _SCREAMING_SNAKE_CASE (A , A ) -> str:
"""simple docstring"""
lowercase__ = []
lowercase__ = pdb_str.split('''\n''' )
lowercase__ = prot.remark
if remark is not None:
out_pdb_lines.append(f"REMARK {remark}" )
lowercase__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowercase__ = []
if prot.parents_chain_index is not None:
lowercase__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(A ) , [] )
parent_dict[str(A )].append(A )
lowercase__ = max([int(A ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowercase__ = parent_dict.get(str(A ) , ['''N/A'''] )
parents_per_chain.append(A )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowercase__ = [['''N/A''']]
def make_parent_line(A ) -> str:
return f"PARENT {' '.join(A )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowercase__ = 0
for i, l in enumerate(A ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(A )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(A ):
lowercase__ = parents_per_chain[chain_counter]
else:
lowercase__ = ['''N/A''']
out_pdb_lines.append(make_parent_line(A ) )
return "\n".join(A )
def _SCREAMING_SNAKE_CASE (A ) -> str:
"""simple docstring"""
lowercase__ = residue_constants.restypes + ['''X''']
def res_atoa(A ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
lowercase__ = residue_constants.atom_types
lowercase__ = []
lowercase__ = prot.atom_mask
lowercase__ = prot.aatype
lowercase__ = prot.atom_positions
lowercase__ = prot.residue_index.astype(np.intaa )
lowercase__ = prot.b_factors
lowercase__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
lowercase__ = get_pdb_headers(A )
if len(A ) > 0:
pdb_lines.extend(A )
lowercase__ = aatype.shape[0]
lowercase__ = 1
lowercase__ = 0
lowercase__ = string.ascii_uppercase
lowercase__ = None
# Add all atom sites.
for i in range(A ):
lowercase__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(A , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowercase__ = '''ATOM'''
lowercase__ = atom_name if len(A ) == 4 else f" {atom_name}"
lowercase__ = ''''''
lowercase__ = ''''''
lowercase__ = 1.00
lowercase__ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowercase__ = ''''''
lowercase__ = '''A'''
if chain_index is not None:
lowercase__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowercase__ = (
f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
f"{res_name_a:>3} {chain_tag:>1}"
f"{residue_index[i]:>4}{insertion_code:>1} "
f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
f"{occupancy:>6.2f}{b_factor:>6.2f} "
f"{element:>2}{charge:>2}"
)
pdb_lines.append(A )
atom_index += 1
lowercase__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowercase__ = True
lowercase__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowercase__ = '''TER'''
lowercase__ = (
f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(A )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(A , A ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(A )
def _SCREAMING_SNAKE_CASE (A ) -> np.ndarray:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _SCREAMING_SNAKE_CASE (A , A , A = None , A = None , A = None , A = None , A = None , ) -> Protein:
"""simple docstring"""
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=A , remark=A , parents=A , parents_chain_index=A , )
| 2 | 0 |
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def a_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : str=1024 ):
'''simple docstring'''
lowercase__ , lowercase__ : Any = [], []
lowercase__ : str = list(zip(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase__ , lowercase__ : List[str] = sorted_examples[0]
def is_too_big(_lowerCAmelCase : int ):
return tok(_lowerCAmelCase , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
lowercase__ : Union[str, Any] = new_src + ' ' + src
lowercase__ : str = new_tgt + ' ' + tgt
if is_too_big(_lowerCAmelCase ) or is_too_big(_lowerCAmelCase ): # cant fit, finalize example
finished_src.append(_lowerCAmelCase )
finished_tgt.append(_lowerCAmelCase )
lowercase__ , lowercase__ : Any = src, tgt
else: # can fit, keep adding
lowercase__ , lowercase__ : Dict = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_lowerCAmelCase )
finished_tgt.append(_lowerCAmelCase )
return finished_src, finished_tgt
def a_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Path , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ : Dict = Path(_lowerCAmelCase )
save_path.mkdir(exist_ok=_lowerCAmelCase )
for split in ["train"]:
lowercase__ , lowercase__ : Dict = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
lowercase__ : Tuple = [x.rstrip() for x in Path(_lowerCAmelCase ).open().readlines()]
lowercase__ : Union[str, Any] = [x.rstrip() for x in Path(_lowerCAmelCase ).open().readlines()]
lowercase__ , lowercase__ : int = pack_examples(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
print(f"""packed {split} split from {len(_lowerCAmelCase )} examples -> {len(_lowerCAmelCase )}.""" )
Path(save_path / f"""{split}.source""" ).open('w' ).write('\n'.join(_lowerCAmelCase ) )
Path(save_path / f"""{split}.target""" ).open('w' ).write('\n'.join(_lowerCAmelCase ) )
for split in ["val", "test"]:
lowercase__ , lowercase__ : List[str] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(_lowerCAmelCase , save_path / f"""{split}.source""" )
shutil.copyfile(_lowerCAmelCase , save_path / f"""{split}.target""" )
def a_ ( ):
'''simple docstring'''
lowercase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=_lowerCAmelCase , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=_lowerCAmelCase , default=128 )
parser.add_argument('--data_dir' , type=_lowerCAmelCase )
parser.add_argument('--save_path' , type=_lowerCAmelCase )
lowercase__ : List[str] = parser.parse_args()
lowercase__ : Tuple = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(_lowerCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 77 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (A , A ) -> list[list[int]]:
"""simple docstring"""
lowercase__ = []
create_all_state(1 , A , A , [] , A )
return result
def _SCREAMING_SNAKE_CASE (A , A , A , A , A , ) -> None:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(A , total_number - level + 2 ):
current_list.append(A )
create_all_state(i + 1 , A , level - 1 , A , A )
current_list.pop()
def _SCREAMING_SNAKE_CASE (A ) -> None:
"""simple docstring"""
for i in total_list:
print(*A )
if __name__ == "__main__":
lowerCamelCase : Tuple = 4
lowerCamelCase : Union[str, Any] = 2
lowerCamelCase : Dict = generate_all_combinations(n, k)
print_all_state(total_list)
| 2 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCamelCase : Optional[Any] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
lowerCamelCase : Tuple = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
lowerCamelCase : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
lowerCamelCase : Any = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
lowerCamelCase : Tuple = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
lowerCamelCase : Optional[int] = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
lowerCamelCase : Dict = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
"""simple docstring"""
lowercase__ ,lowercase__ = randrange(len(A ) ), randrange(len(A ) )
lowercase__ = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
lowercase__ ,lowercase__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _SCREAMING_SNAKE_CASE (A = 100 ) -> str:
"""simple docstring"""
return (generate_random_hand() for _ in range(A ))
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]:
"""simple docstring"""
assert PokerHand(A )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(A )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , A )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Any:
"""simple docstring"""
lowercase__ = PokerHand(A )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Tuple:
"""simple docstring"""
assert PokerHand(A )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(A )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected
def _SCREAMING_SNAKE_CASE () -> Tuple:
"""simple docstring"""
lowercase__ = [PokerHand(A ) for hand in SORTED_HANDS]
lowercase__ = poker_hands.copy()
shuffle(A )
lowercase__ = chain(sorted(A ) )
for index, hand in enumerate(A ):
assert hand == poker_hands[index]
def _SCREAMING_SNAKE_CASE () -> List[Any]:
"""simple docstring"""
lowercase__ = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=A )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _SCREAMING_SNAKE_CASE () -> int:
"""simple docstring"""
lowercase__ = PokerHand('''2C 4S AS 3D 5C''' )
lowercase__ = True
lowercase__ = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
"""simple docstring"""
lowercase__ = 0
lowercase__ = os.path.abspath(os.path.dirname(A ) )
lowercase__ = os.path.join(A , '''poker_hands.txt''' )
with open(A ) as file_hand:
for line in file_hand:
lowercase__ = line[:14].strip()
lowercase__ = line[15:].strip()
lowercase__ ,lowercase__ = PokerHand(A ), PokerHand(A )
lowercase__ = player.compare_with(A )
if output == "Win":
answer += 1
assert answer == 376
| 2 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class _UpperCAmelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
snake_case = '''focalnet'''
def __init__( self : Dict , __UpperCAmelCase : Optional[Any]=224 , __UpperCAmelCase : Optional[Any]=4 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Optional[int]=96 , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=[192, 384, 768, 768] , __UpperCAmelCase : str=[2, 2, 6, 2] , __UpperCAmelCase : int=[2, 2, 2, 2] , __UpperCAmelCase : Optional[int]=[3, 3, 3, 3] , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Dict=4.0 , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : int=False , __UpperCAmelCase : Dict=1E-4 , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : str=False , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=0.02 , __UpperCAmelCase : Optional[int]=1E-5 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : Any=None , __UpperCAmelCase : Optional[int]=None , **__UpperCAmelCase : Any , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
_A = image_size
_A = patch_size
_A = num_channels
_A = embed_dim
_A = use_conv_embed
_A = hidden_sizes
_A = depths
_A = focal_levels
_A = focal_windows
_A = hidden_act
_A = mlp_ratio
_A = hidden_dropout_prob
_A = drop_path_rate
_A = use_layerscale
_A = layerscale_value
_A = use_post_layernorm
_A = use_post_layernorm_in_modulation
_A = normalize_modulator
_A = initializer_range
_A = layer_norm_eps
_A = encoder_stride
_A = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
_A , _A = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
| 79 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase : str = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase : List[Any] = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase : Any = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCamelCase : int = model.state_dict()
lowerCamelCase : int = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
lowerCamelCase : Tuple = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
lowerCamelCase : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
lowerCamelCase : List[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
lowerCamelCase : Tuple = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
lowerCamelCase : Optional[int] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
lowerCamelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
lowerCamelCase : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
lowerCamelCase : Any = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
lowerCamelCase : Optional[int] = state_dict['cls.predictions.decoder.weight']
lowerCamelCase : str = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase : str = state_dict[f"""cls.predictions.transform.dense.{w}"""]
lowerCamelCase : Any = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 2 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __a ( self ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def __a ( self ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def __a ( self ):
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
UpperCamelCase__ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def __a ( self ):
UpperCamelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCamelCase__ = DDPMScheduler()
UpperCamelCase__ = AudioDiffusionPipeline(vqvae=a , unet=self.dummy_unet , mel=a , scheduler=a )
UpperCamelCase__ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
UpperCamelCase__ = torch.Generator(device=a ).manual_seed(42 )
UpperCamelCase__ = pipe(generator=a , steps=4 )
UpperCamelCase__ = output.audios[0]
UpperCamelCase__ = output.images[0]
UpperCamelCase__ = torch.Generator(device=a ).manual_seed(42 )
UpperCamelCase__ = pipe(generator=a , steps=4 , return_dict=a )
UpperCamelCase__ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCamelCase__ = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
UpperCamelCase__ = np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase__ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCamelCase__ = DDIMScheduler()
UpperCamelCase__ = self.dummy_vqvae_and_unet
UpperCamelCase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=a , scheduler=a )
UpperCamelCase__ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
np.random.seed(0 )
UpperCamelCase__ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCamelCase__ = torch.Generator(device=a ).manual_seed(42 )
UpperCamelCase__ = pipe(raw_audio=a , generator=a , start_step=5 , steps=10 )
UpperCamelCase__ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCamelCase__ = np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase__ = self.dummy_unet_condition
UpperCamelCase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=a , mel=a , scheduler=a )
UpperCamelCase__ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
np.random.seed(0 )
UpperCamelCase__ = torch.rand((1, 1, 10) )
UpperCamelCase__ = pipe(generator=a , encoding=a )
UpperCamelCase__ = output.images[0]
UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCamelCase__ = np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
UpperCamelCase__ = torch_device
UpperCamelCase__ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
UpperCamelCase__ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
UpperCamelCase__ = torch.Generator(device=a ).manual_seed(42 )
UpperCamelCase__ = pipe(generator=a )
UpperCamelCase__ = output.audios[0]
UpperCamelCase__ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCamelCase__ = np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 80 |
'''simple docstring'''
from ....utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : int=2048 ):
'''simple docstring'''
lowercase__ = config.__dict__
lowercase__ = modal_hidden_size
if num_labels:
lowercase__ = num_labels
| 2 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
lowerCamelCase_ : List[Any] = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "nllb-moe"
__lowerCAmelCase = ["past_key_values"]
__lowerCAmelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __A=12_8112 , __A=1024 , __A=12 , __A=4096 , __A=16 , __A=12 , __A=4096 , __A=16 , __A=0.05 , __A=0.05 , __A=True , __A=True , __A="relu" , __A=1024 , __A=0.1 , __A=0.1 , __A=0.0 , __A=0.02 , __A=2 , __A=True , __A=False , __A="float32" , __A=False , __A=128 , __A=64 , __A=4 , __A=4 , __A=0.001 , __A=0.001 , __A="all" , __A=False , __A=False , __A=1.0 , __A=0.2 , __A=1 , __A=0 , __A=2 , __A=False , **__A , ) -> List[Any]:
a =vocab_size
a =max_position_embeddings
a =d_model
a =encoder_ffn_dim
a =encoder_layers
a =encoder_attention_heads
a =decoder_ffn_dim
a =decoder_layers
a =decoder_attention_heads
a =dropout
a =attention_dropout
a =activation_dropout
a =activation_function
a =init_std
a =encoder_layerdrop
a =decoder_layerdrop
a =use_cache
a =encoder_layers
a =scale_embedding # scale factor will be sqrt(d_model) if True
a =router_z_loss_coef
a =router_aux_loss_coef
a =decoder_sparse_step
a =encoder_sparse_step
a =num_experts
a =expert_capacity
a =router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
a =router_dtype
a =router_ignore_padding_tokens
a =batch_prioritized_routing
a =second_expert_policy
a =normalize_router_prob_before_dropping
a =moe_eval_capacity_token_fraction
a =moe_token_dropout
a =output_router_logits
super().__init__(
pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , **__A , ) | 81 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = """cvt"""
def __init__(self : int , UpperCamelCase : List[Any]=3 , UpperCamelCase : int=[7, 3, 3] , UpperCamelCase : str=[4, 2, 2] , UpperCamelCase : Dict=[2, 1, 1] , UpperCamelCase : Dict=[64, 192, 384] , UpperCamelCase : Dict=[1, 3, 6] , UpperCamelCase : Dict=[1, 2, 10] , UpperCamelCase : Any=[4.0, 4.0, 4.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : int=[0.0, 0.0, 0.1] , UpperCamelCase : Any=[True, True, True] , UpperCamelCase : int=[False, False, True] , UpperCamelCase : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase : Optional[int]=[3, 3, 3] , UpperCamelCase : Tuple=[1, 1, 1] , UpperCamelCase : Any=[2, 2, 2] , UpperCamelCase : Dict=[1, 1, 1] , UpperCamelCase : List[str]=[1, 1, 1] , UpperCamelCase : str=0.02 , UpperCamelCase : int=1E-12 , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = patch_stride
lowercase__ = patch_padding
lowercase__ = embed_dim
lowercase__ = num_heads
lowercase__ = depth
lowercase__ = mlp_ratio
lowercase__ = attention_drop_rate
lowercase__ = drop_rate
lowercase__ = drop_path_rate
lowercase__ = qkv_bias
lowercase__ = cls_token
lowercase__ = qkv_projection_method
lowercase__ = kernel_qkv
lowercase__ = padding_kv
lowercase__ = stride_kv
lowercase__ = padding_q
lowercase__ = stride_q
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
| 2 | 0 |
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
if principal <= 0:
raise Exception("""Principal borrowed must be > 0""" )
if rate_per_annum < 0:
raise Exception("""Rate of interest must be >= 0""" )
if years_to_repay <= 0 or not isinstance(snake_case , snake_case ):
raise Exception("""Years to repay must be an integer > 0""" )
# Yearly rate is divided by 12 to get monthly rate
_lowerCAmelCase = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_lowerCAmelCase = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 |
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowerCamelCase : Any = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowerCamelCase : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowerCamelCase : Any = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
lowerCamelCase : List[Any] = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
lowerCamelCase : List[str] = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
lowerCamelCase : List[str] = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(64, 64)
)
lowerCamelCase : Optional[int] = tf.keras.preprocessing.image.img_to_array(test_image)
lowerCamelCase : str = np.expand_dims(test_image, axis=0)
lowerCamelCase : List[str] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowerCamelCase : Any = 'Normal'
if result[0][0] == 1:
lowerCamelCase : Any = 'Abnormality detected'
| 2 | 0 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , lowercase , )
class lowercase__ ( lowercase ):
lowercase__ = RobertaConfig
lowercase__ = """roberta"""
def __init__( self : str ,lowerCamelCase__ : Any ):
'''simple docstring'''
super().__init__(lowerCamelCase__ )
_UpperCamelCase : List[str] = RobertaEmbeddings(lowerCamelCase__ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , lowercase , )
class lowercase__ ( lowercase ):
lowercase__ = RobertaConfig
lowercase__ = """roberta"""
def __init__( self : Optional[Any] ,lowerCamelCase__ : Any ):
'''simple docstring'''
super().__init__(lowerCamelCase__ )
_UpperCamelCase : Any = config.num_labels
_UpperCamelCase : List[str] = config.num_hidden_layers
_UpperCamelCase : int = DeeRobertaModel(lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob )
_UpperCamelCase : Optional[Any] = nn.Linear(config.hidden_size ,self.config.num_labels )
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : List[str]=None ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : List[str]=None ,lowerCamelCase__ : Any=-1 ,lowerCamelCase__ : str=False ,):
'''simple docstring'''
_UpperCamelCase : Any = self.num_layers
try:
_UpperCamelCase : Tuple = self.roberta(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,position_ids=lowerCamelCase__ ,head_mask=lowerCamelCase__ ,inputs_embeds=lowerCamelCase__ ,)
_UpperCamelCase : Tuple = outputs[1]
_UpperCamelCase : Optional[int] = self.dropout(lowerCamelCase__ )
_UpperCamelCase : List[str] = self.classifier(lowerCamelCase__ )
_UpperCamelCase : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_UpperCamelCase : Optional[int] = e.message
_UpperCamelCase : List[str] = e.exit_layer
_UpperCamelCase : Optional[Any] = outputs[0]
if not self.training:
_UpperCamelCase : int = entropy(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_UpperCamelCase : Union[str, Any] = MSELoss()
_UpperCamelCase : Tuple = loss_fct(logits.view(-1 ) ,labels.view(-1 ) )
else:
_UpperCamelCase : int = CrossEntropyLoss()
_UpperCamelCase : Dict = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
# work with highway exits
_UpperCamelCase : List[Any] = []
for highway_exit in outputs[-1]:
_UpperCamelCase : List[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCamelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_UpperCamelCase : List[str] = MSELoss()
_UpperCamelCase : Union[str, Any] = loss_fct(highway_logits.view(-1 ) ,labels.view(-1 ) )
else:
_UpperCamelCase : Optional[Any] = CrossEntropyLoss()
_UpperCamelCase : str = loss_fct(highway_logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
highway_losses.append(lowerCamelCase__ )
if train_highway:
_UpperCamelCase : str = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_UpperCamelCase : Union[str, Any] = (loss,) + outputs
if not self.training:
_UpperCamelCase : int = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_UpperCamelCase : Any = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 83 |
'''simple docstring'''
class __lowerCAmelCase : # Public class to implement a graph
'''simple docstring'''
def __init__(self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ):
'''simple docstring'''
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCamelCase__ (self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ):
'''simple docstring'''
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase )
def UpperCamelCase__ (self : Dict ): # And finally, count all islands.
'''simple docstring'''
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase , UpperCamelCase , UpperCamelCase )
count += 1
return count
| 2 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : int ) -> Dict:
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
lowerCAmelCase_ :List[Any] = len(lowercase__ )
lowerCAmelCase_ :Optional[Any] = max(lowercase__ )
lowerCAmelCase_ :Tuple = min(lowercase__ )
# create the counting array
lowerCAmelCase_ :Optional[Any] = coll_max + 1 - coll_min
lowerCAmelCase_ :int = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowercase__ ):
lowerCAmelCase_ :Optional[Any] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
lowerCAmelCase_ :Union[str, Any] = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowercase__ ) ):
lowerCAmelCase_ :int = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _snake_case ( lowercase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 84 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
lowerCamelCase : Tuple = 'naver-clova-ix/donut-base'
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = DonutProcessor.from_pretrained(UpperCamelCase )
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowercase__ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowercase__ = self.processor.tokenajson(UpperCamelCase )
self.assertDictEqual(UpperCamelCase , UpperCamelCase )
| 2 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Tuple = ["image_processor", "tokenizer"]
lowerCAmelCase_ : Any = "FlavaImageProcessor"
lowerCAmelCase_ : Dict = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , a__=None , a__=None , **a__ ) -> int:
'''simple docstring'''
snake_case_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a__ , )
snake_case_ = kwargs.pop("feature_extractor" )
snake_case_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a__ , a__ )
snake_case_ = self.image_processor
def __call__( self , a__ = None , a__ = None , a__ = True , a__ = False , a__ = False , a__ = None , a__ = 0 , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = False , a__ = False , a__ = False , a__ = False , a__ = True , a__ = None , **a__ , ) -> Optional[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
snake_case_ = self.tokenizer(
text=a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , stride=a__ , pad_to_multiple_of=a__ , return_token_type_ids=a__ , return_attention_mask=a__ , return_overflowing_tokens=a__ , return_special_tokens_mask=a__ , return_offsets_mapping=a__ , return_length=a__ , verbose=a__ , return_tensors=a__ , **a__ , )
if images is not None:
snake_case_ = self.image_processor(
a__ , return_image_mask=a__ , return_codebook_pixels=a__ , return_tensors=a__ , **a__ , )
if text is not None and images is not None:
encoding.update(a__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def lowerCAmelCase__ ( self , *a__ , **a__ ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*a__ , **a__ )
def lowerCAmelCase__ ( self , *a__ , **a__ ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*a__ , **a__ )
@property
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.tokenizer.model_input_names
snake_case_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a__ , )
return self.image_processor_class
@property
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a__ , )
return self.image_processor
| 85 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (A ) -> bool:
"""simple docstring"""
return len(set(A ) ) == len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
"""simple docstring"""
import math
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = []
__lowerCAmelCase : Dict = 2
__lowerCAmelCase : Any = int(math.sqrt(_UpperCamelCase ) ) # Size of every segment
__lowerCAmelCase : Tuple = [True] * (end + 1)
__lowerCAmelCase : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(_UpperCamelCase )
for i in range(start * start , end + 1 , _UpperCamelCase ):
__lowerCAmelCase : int = False
start += 1
prime += in_prime
__lowerCAmelCase : Union[str, Any] = end + 1
__lowerCAmelCase : Tuple = min(2 * end , _UpperCamelCase )
while low <= n:
__lowerCAmelCase : List[str] = [True] * (high - low + 1)
for each in in_prime:
__lowerCAmelCase : int = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_UpperCamelCase , high + 1 , _UpperCamelCase ):
__lowerCAmelCase : Any = False
for j in range(len(_UpperCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
__lowerCAmelCase : Tuple = high + 1
__lowerCAmelCase : int = min(high + end , _UpperCamelCase )
return prime
print(sieve(10**6)) | 86 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCamelCase : Any = None
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase : List[str] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase : Any = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : int = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ : Optional[int] = TaTokenizer
lowerCAmelCase__ : List[int] = []
def __init__(self : Dict , UpperCamelCase : str=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Any="</s>" , UpperCamelCase : str="<unk>" , UpperCamelCase : List[str]="<pad>" , UpperCamelCase : List[str]=100 , UpperCamelCase : Tuple=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowercase__ = [f"<extra_id_{i}>" for i in range(UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase__ = len(set(filter(lambda UpperCamelCase : bool('''extra_id_''' in str(UpperCamelCase ) ) , UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , extra_ids=UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
lowercase__ = extra_ids
@staticmethod
def UpperCamelCase__ (UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase , )
return max_model_length
def UpperCamelCase__ (self : Any , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase__ = os.path.join(
UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
logger.info(f"Copy vocab file to {out_vocab_file}" )
return (out_vocab_file,)
def UpperCamelCase__ (self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase__ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase ) for token in self.get_sentinel_tokens()]
| 2 | 0 |
def lowercase_ ( _lowerCamelCase : int):
if isinstance(_lowerCamelCase , _lowerCamelCase):
raise TypeError("'float' object cannot be interpreted as an integer")
if isinstance(_lowerCamelCase , _lowerCamelCase):
raise TypeError("'str' object cannot be interpreted as an integer")
if num == 0:
return "0b0"
lowercase__ : Optional[Any] = False
if num < 0:
lowercase__ : Optional[int] = True
lowercase__ : List[Any] = -num
lowercase__ : list[int] = []
while num > 0:
binary.insert(0 , num % 2)
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCamelCase) for e in binary)
return "0b" + "".join(str(_lowerCamelCase) for e in binary)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Dict = ShapEImgaImgPipeline
lowerCAmelCase__ : List[str] = ["""image"""]
lowerCAmelCase__ : Any = ["""image"""]
lowerCAmelCase__ : Any = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase__ : Tuple = False
@property
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
return 32
@property
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase__ (self : int ):
'''simple docstring'''
return 8
@property
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase__ = CLIPVisionModel(UpperCamelCase )
return model
@property
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase , do_normalize=UpperCamelCase , do_resize=UpperCamelCase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase__ = PriorTransformer(**UpperCamelCase )
return model
@property
def UpperCamelCase__ (self : int ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase__ = ShapERenderer(**UpperCamelCase )
return model
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.dummy_prior
lowercase__ = self.dummy_image_encoder
lowercase__ = self.dummy_image_processor
lowercase__ = self.dummy_renderer
lowercase__ = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase , clip_sample=UpperCamelCase , clip_sample_range=1.0 , )
lowercase__ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str=0 ):
'''simple docstring'''
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
if str(UpperCamelCase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase )
else:
lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
lowercase__ = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = '''cpu'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
lowercase__ = output.images[0]
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = torch_device == '''cpu'''
lowercase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase , relax_max_difference=UpperCamelCase , )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = 1
lowercase__ = 2
lowercase__ = self.get_dummy_inputs(UpperCamelCase )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ = batch_size * [inputs[key]]
lowercase__ = pipe(**UpperCamelCase , num_images_per_prompt=UpperCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowercase__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
lowercase__ = pipe(
UpperCamelCase , generator=UpperCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 2 | 0 |
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : str=None , UpperCamelCase__ : int=None ) -> List[str]:
"""simple docstring"""
__magic_name__ = list(poly_a or [0] )[:]
__magic_name__ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__magic_name__ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
__magic_name__ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
__magic_name__ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
__magic_name__ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
__magic_name__ = self.__multiply()
def _lowercase ( self : int , UpperCamelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(UpperCamelCase__ ) <= 1:
return dft[0]
#
__magic_name__ = self.c_max_length // 2
while next_ncol > 0:
__magic_name__ = [[] for i in range(UpperCamelCase__ )]
__magic_name__ = self.root**next_ncol
# First half of next step
__magic_name__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCamelCase__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
__magic_name__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCamelCase__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
__magic_name__ = new_dft
__magic_name__ = next_ncol // 2
return dft[0]
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.__dft("""A""" )
__magic_name__ = self.__dft("""B""" )
__magic_name__ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
__magic_name__ = 2
while next_ncol <= self.c_max_length:
__magic_name__ = [[] for i in range(UpperCamelCase__ )]
__magic_name__ = self.root ** (next_ncol // 2)
__magic_name__ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
__magic_name__ = new_inverse_c
next_ncol *= 2
# Unpack
__magic_name__ = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__magic_name__ = """A = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
__magic_name__ = """B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
__magic_name__ = """A*B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase : str = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = """realm"""
def __init__(self : str , UpperCamelCase : List[Any]=30522 , UpperCamelCase : List[Any]=768 , UpperCamelCase : int=128 , UpperCamelCase : Any=12 , UpperCamelCase : Tuple=12 , UpperCamelCase : List[Any]=8 , UpperCamelCase : Union[str, Any]=3072 , UpperCamelCase : List[str]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Dict=512 , UpperCamelCase : Dict=2 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : List[Any]=1E-12 , UpperCamelCase : Dict=256 , UpperCamelCase : Union[str, Any]=10 , UpperCamelCase : Optional[int]=1E-3 , UpperCamelCase : Tuple=5 , UpperCamelCase : Optional[int]=320 , UpperCamelCase : List[str]=13353718 , UpperCamelCase : Optional[Any]=5000 , UpperCamelCase : str=1 , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : List[Any]=2 , **UpperCamelCase : int , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
# Common config
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = retriever_proj_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_candidates
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = type_vocab_size
lowercase__ = layer_norm_eps
# Reader config
lowercase__ = span_hidden_size
lowercase__ = max_span_width
lowercase__ = reader_layer_norm_eps
lowercase__ = reader_beam_size
lowercase__ = reader_seq_len
# Retrieval config
lowercase__ = num_block_records
lowercase__ = searcher_beam_size
| 2 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
__A = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> int:
"""simple docstring"""
__lowerCamelCase = {}
with open(UpperCamelCase__ , 'r' ) as file:
for line_number, line in enumerate(UpperCamelCase__ ):
__lowerCamelCase = line.strip()
if line:
__lowerCamelCase = line.split()
__lowerCamelCase = line_number
__lowerCamelCase = words[0]
__lowerCamelCase = value
return result
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for attribute in key.split('.' ):
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCamelCase__ ):
__lowerCamelCase = PARAM_MAPPING[full_name.split('.' )[-1]]
__lowerCamelCase = 'param'
if weight_type is not None and weight_type != "param":
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
elif weight_type is not None and weight_type == "param":
__lowerCamelCase = hf_pointer
for attribute in hf_param_name.split('.' ):
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = shape_pointer.shape
# let's reduce dimension
__lowerCamelCase = value[0]
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCamelCase__ ):
__lowerCamelCase = PARAM_MAPPING[full_name.split('.' )[-1]]
__lowerCamelCase = 'param'
if weight_type is not None and weight_type != "param":
__lowerCamelCase = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__lowerCamelCase = '.'.join([key, hf_param_name] )
else:
__lowerCamelCase = key
__lowerCamelCase = value if 'lm_head' in full_key else value[0]
__A = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]=None ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = False
for key, mapped_key in MAPPING.items():
__lowerCamelCase = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2]
__lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ )
if "weight_g" in name:
__lowerCamelCase = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase = 'weight_v'
elif "bias" in name:
__lowerCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase = 'weight'
else:
__lowerCamelCase = None
if hf_dict is not None:
rename_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return is_used
return is_used
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = []
__lowerCamelCase = fairseq_model.state_dict()
__lowerCamelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase = True
else:
__lowerCamelCase = load_wavaveca_layer(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : int ) -> Any:
"""simple docstring"""
__lowerCamelCase = full_name.split('conv_layers.' )[-1]
__lowerCamelCase = name.split('.' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Any=None , UpperCamelCase__ : str=None , UpperCamelCase__ : int=True , UpperCamelCase__ : str=False ) -> str:
"""simple docstring"""
if config_path is not None:
__lowerCamelCase = WavaVecaConfig.from_pretrained(UpperCamelCase__ )
else:
__lowerCamelCase = WavaVecaConfig()
if is_seq_class:
__lowerCamelCase = read_txt_into_dict(UpperCamelCase__ )
__lowerCamelCase = idalabel
__lowerCamelCase = WavaVecaForSequenceClassification(UpperCamelCase__ )
__lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
feature_extractor.save_pretrained(UpperCamelCase__ )
elif is_finetuned:
if dict_path:
__lowerCamelCase = Dictionary.load(UpperCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCamelCase = target_dict.pad_index
__lowerCamelCase = target_dict.bos_index
__lowerCamelCase = target_dict.eos_index
__lowerCamelCase = len(target_dict.symbols )
__lowerCamelCase = os.path.join(UpperCamelCase__ , 'vocab.json' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCamelCase__ ) )
return
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
__lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowerCamelCase = 0
__lowerCamelCase = 1
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = WavaVecaCTCTokenizer(
UpperCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCamelCase__ , )
__lowerCamelCase = True if config.feat_extract_norm == 'layer' else False
__lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
__lowerCamelCase = WavaVecaProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
__lowerCamelCase = WavaVecaForCTC(UpperCamelCase__ )
else:
__lowerCamelCase = WavaVecaForPreTraining(UpperCamelCase__ )
if is_finetuned or is_seq_class:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__lowerCamelCase = argparse.Namespace(task='audio_pretraining' )
__lowerCamelCase = fairseq.tasks.setup_task(UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCamelCase__ )
__lowerCamelCase = model[0].eval()
recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
__A = parser.parse_args()
__A = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 90 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : int = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = """mvp"""
lowerCAmelCase__ : Optional[Any] = ["""past_key_values"""]
lowerCAmelCase__ : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self : Any , UpperCamelCase : Optional[int]=50267 , UpperCamelCase : Tuple=1024 , UpperCamelCase : int=12 , UpperCamelCase : Tuple=4096 , UpperCamelCase : Dict=16 , UpperCamelCase : int=12 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : Optional[int]=16 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Union[str, Any]=1024 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : str=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : List[str]=False , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=1 , UpperCamelCase : int=0 , UpperCamelCase : int=2 , UpperCamelCase : Any=True , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Tuple=False , UpperCamelCase : int=100 , UpperCamelCase : Optional[Any]=800 , **UpperCamelCase : str , ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = classifier_dropout
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = use_prompt
lowercase__ = prompt_length
lowercase__ = prompt_mid_dim
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , **UpperCamelCase , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , UpperCamelCase ):
lowercase__ = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
'''The config can simply be saved and uploaded again to be fixed.''' )
| 2 | 0 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase_ : Tuple = getLogger(__name__)
def _A (__a , __a , __a , __a = 8 , __a = 10_24 , __a="val" , __a=None , __a=False , __a="summarization" , __a=None , __a=1 , __a = None , __a="" , **__a , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = str(__a )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=__a )
SCREAMING_SNAKE_CASE_ : int = Path(__a )
SCREAMING_SNAKE_CASE_ : str = save_dir.joinpath(f'rank_{local_rank}_output.json' )
torch.cuda.set_device(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(__a ).cuda()
if fpaa:
SCREAMING_SNAKE_CASE_ : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__a , __a ) # update config with task specific params
SCREAMING_SNAKE_CASE_ : str = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
SCREAMING_SNAKE_CASE_ : Tuple = num_return_sequences
SCREAMING_SNAKE_CASE_ : Tuple = AutoTokenizer.from_pretrained(__a )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
if max_source_length is None:
SCREAMING_SNAKE_CASE_ : int = tokenizer.model_max_length
if prefix is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SeqaSeqDataset(
__a , __a , __a , max_target_length=10_24 , type_path=__a , n_obs=__a , prefix=__a , **__a , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
SCREAMING_SNAKE_CASE_ : str = ds.make_sortish_sampler(__a , distributed=__a , add_extra_examples=__a , shuffle=__a )
SCREAMING_SNAKE_CASE_ : int = DataLoader(__a , sampler=__a , batch_size=__a , collate_fn=ds.collate_fn )
SCREAMING_SNAKE_CASE_ : str = []
for batch in tqdm(__a ):
SCREAMING_SNAKE_CASE_ : Dict = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=__a , num_beams=__a , **__a , )
SCREAMING_SNAKE_CASE_ : int = tokenizer.batch_decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
SCREAMING_SNAKE_CASE_ : Tuple = batch['''ids''']
if num_return_sequences > 1:
SCREAMING_SNAKE_CASE_ : Dict = chunks(__a , __a ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__a ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(__a , __a )
return results, sampler.num_replicas
def _A () -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=__a , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=__a , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=__a , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=__a , default=__a )
parser.add_argument(
'''--type_path''' , type=__a , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=__a , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=__a , default=8 , required=__a , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=__a , default=-1 , required=__a , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=__a , default=__a , required=__a , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=__a , default=1 , required=__a , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=__a , default=6_00 , required=__a , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=__a , default=__a , required=__a )
parser.add_argument('''--tgt_lang''' , type=__a , default=__a , required=__a )
parser.add_argument(
'''--prefix''' , type=__a , required=__a , default=__a , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
SCREAMING_SNAKE_CASE_ : Any = time.time()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = parser.parse_known_args()
SCREAMING_SNAKE_CASE_ : Optional[Any] = parse_numeric_n_bool_cl_kwargs(__a )
if generate_kwargs and args.local_rank <= 0:
print(f'parsed the following generate kwargs: {generate_kwargs}' )
SCREAMING_SNAKE_CASE_ : Dict = Path(args.save_dir + '''_tmp''' )
Path(__a ).mkdir(exist_ok=__a ) # this handles locking.
SCREAMING_SNAKE_CASE_ : Optional[int] = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(f'Found files at {json_save_dir} please move or remove them.' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
SCREAMING_SNAKE_CASE_ : Dict = {}
if args.src_lang is not None:
SCREAMING_SNAKE_CASE_ : int = args.src_lang
if args.tgt_lang is not None:
SCREAMING_SNAKE_CASE_ : Tuple = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = eval_data_dir(
args.data_dir , __a , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__a , **__a , )
if args.local_rank <= 0:
SCREAMING_SNAKE_CASE_ : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=__a )
SCREAMING_SNAKE_CASE_ : Dict = gather_results_from_each_node(__a , __a , args.sync_timeout )
SCREAMING_SNAKE_CASE_ : Dict = combine_partial_results(__a )
if args.num_return_sequences > 1:
SCREAMING_SNAKE_CASE_ : Any = save_dir.joinpath('''pseudolabel_results.json''' )
print(f'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' )
save_json(__a , __a )
return
SCREAMING_SNAKE_CASE_ : Optional[int] = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(__a ) as f:
SCREAMING_SNAKE_CASE_ : Any = [x.rstrip() for x in f.readlines()][: len(__a )]
# Calculate metrics, save metrics, and save _generations.txt
SCREAMING_SNAKE_CASE_ : int = '''translation''' in args.task
SCREAMING_SNAKE_CASE_ : str = calculate_bleu if calc_bleu else calculate_rouge
SCREAMING_SNAKE_CASE_ : Dict = '''bleu''' if calc_bleu else '''rouge'''
SCREAMING_SNAKE_CASE_ : Dict = score_fn(__a , __a )
SCREAMING_SNAKE_CASE_ : int = len(__a )
SCREAMING_SNAKE_CASE_ : Optional[int] = time.time() - start_time
SCREAMING_SNAKE_CASE_ : int = round(runtime / metrics['''n_obs'''] , 4 )
SCREAMING_SNAKE_CASE_ : Dict = num_replicas
# TODO(@stas00): add whatever metadata to metrics
SCREAMING_SNAKE_CASE_ : str = save_dir.joinpath(f'{args.type_path}_{metric_name}.json' )
save_json(__a , __a , indent=__a )
print(__a )
write_txt_file(__a , save_dir.joinpath(f'{args.type_path}_generations.txt' ) )
if args.debug:
write_txt_file(__a , save_dir.joinpath(f'{args.type_path}.target' ) )
else:
shutil.rmtree(__a )
def _A (__a ) -> List:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = []
for partial_result in partial_results:
records.extend(__a )
SCREAMING_SNAKE_CASE_ : int = sorted(__a , key=lambda __a : x["id"] )
SCREAMING_SNAKE_CASE_ : Dict = [x['''pred'''] for x in records]
return preds
def _A (__a , __a , __a ) -> List[Dict[str, List]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = time.time()
logger.info('''waiting for all nodes to finish''' )
SCREAMING_SNAKE_CASE_ : Any = None
while (time.time() - start_wait) < timeout:
SCREAMING_SNAKE_CASE_ : Optional[int] = list(save_dir.glob('''rank_*.json''' ) )
if len(__a ) < num_replicas:
continue
try:
# make sure all json files are fully saved
SCREAMING_SNAKE_CASE_ : int = lmap(__a , __a )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 91 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : List[str] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : int = DebertaVaTokenizer
lowerCAmelCase__ : List[Any] = DebertaVaTokenizerFast
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Tuple = True
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = DebertaVaTokenizer(UpperCamelCase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = '''this is a test'''
lowercase__ = '''this is a test'''
return input_text, output_text
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''<pad>'''
lowercase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(UpperCamelCase ) , 30001 )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = ''' \tHeLLo!how \n Are yoU? '''
lowercase__ = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = ''' \tHeLLo!how \n Are yoU? '''
lowercase__ = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = '''This is a test'''
lowercase__ = [13, 1, 4398, 25, 21, 1289]
lowercase__ = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__ = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__ = DebertaVaTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , keep_accents=UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# fmt: off
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase__ = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = DebertaVaTokenizer(UpperCamelCase )
lowercase__ = tokenizer.encode('''sequence builders''' )
lowercase__ = tokenizer.encode('''multi-sequence build''' )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase , )
@slow
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 2 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class a__ ( snake_case__ ):
_a : Optional[int] = """decision_transformer"""
_a : Optional[int] = ["""past_key_values"""]
_a : Dict = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , _A=1_7 , _A=4 , _A=1_2_8 , _A=4_0_9_6 , _A=True , _A=1 , _A=1_0_2_4 , _A=3 , _A=1 , _A=None , _A="relu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=1E-5 , _A=0.02 , _A=True , _A=True , _A=5_0_2_5_6 , _A=5_0_2_5_6 , _A=False , _A=False , **_A , ):
"""simple docstring"""
__lowerCAmelCase = state_dim
__lowerCAmelCase = act_dim
__lowerCAmelCase = hidden_size
__lowerCAmelCase = max_ep_len
__lowerCAmelCase = action_tanh
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scale_attn_weights
__lowerCAmelCase = use_cache
__lowerCAmelCase = scale_attn_by_inverse_layer_idx
__lowerCAmelCase = reorder_and_upcast_attn
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
| 92 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _SCREAMING_SNAKE_CASE (A ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A , A )
def _SCREAMING_SNAKE_CASE (A ) -> List[str]:
"""simple docstring"""
lowercase__ ,lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(A , A , bias=A )
lowercase__ = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE (A , A="facebook/mbart-large-en-ro" , A=False , A=False ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = torch.load(A , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A )
lowercase__ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase__ = MBartConfig.from_pretrained(A , vocab_size=A )
if mbart_aa and finetuned:
lowercase__ = '''relu'''
lowercase__ = state_dict['''decoder.embed_tokens.weight''']
lowercase__ = MBartForConditionalGeneration(A )
model.model.load_state_dict(A )
if finetuned:
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
lowerCamelCase : Any = parser.parse_args()
lowerCamelCase : List[str] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 2 | 0 |
'''simple docstring'''
_lowercase : Union[str, Any] = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 93 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowerCamelCase : List[Any] = logging.getLogger(__name__)
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCamelCase : Any=-1 ):
'''simple docstring'''
lowercase__ = label_idx
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[Split, str] ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ):
lowercase__ = mode.value
lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" )
lowercase__ = 1
lowercase__ = []
with open(UpperCamelCase , encoding='''utf-8''' ) as f:
lowercase__ = []
lowercase__ = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) )
guid_index += 1
lowercase__ = []
lowercase__ = []
else:
lowercase__ = line.split(''' ''' )
words.append(splits[0] )
if len(UpperCamelCase ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) )
return examples
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ):
'''simple docstring'''
lowercase__ = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(UpperCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowercase__ = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(UpperCamelCase )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
if path:
with open(UpperCamelCase , '''r''' ) as f:
lowercase__ = f.read().splitlines()
if "O" not in labels:
lowercase__ = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : List[Any] ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : str ):
'''simple docstring'''
if path:
with open(UpperCamelCase , '''r''' ) as f:
lowercase__ = f.read().splitlines()
if "O" not in labels:
lowercase__ = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def UpperCamelCase__ (self : Tuple , UpperCamelCase : int , UpperCamelCase : Union[Split, str] ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ):
lowercase__ = mode.value
lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" )
lowercase__ = 1
lowercase__ = []
with open(UpperCamelCase , encoding='''utf-8''' ) as f:
for sentence in parse_incr(UpperCamelCase ):
lowercase__ = []
lowercase__ = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(UpperCamelCase ) == len(UpperCamelCase )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) )
guid_index += 1
return examples
def UpperCamelCase__ (self : Tuple , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ):
'''simple docstring'''
lowercase__ = 0
for sentence in parse_incr(UpperCamelCase ):
lowercase__ = preds_list[example_id]
lowercase__ = ''''''
for token in sentence:
out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) "
out += "\n"
writer.write(UpperCamelCase )
example_id += 1
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
if path:
with open(UpperCamelCase , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 2 | 0 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __lowerCamelCase ( ):
"""simple docstring"""
print('''Making key files...''' )
make_key_files('''rsa''' , 1024 )
print('''Key files generation successful.''' )
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
print('''Generating prime p...''' )
a :Dict = rabinMiller.generate_large_prime(UpperCAmelCase_ )
print('''Generating prime q...''' )
a :Optional[Any] = rabinMiller.generate_large_prime(UpperCAmelCase_ )
a :int = p * q
print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' )
while True:
a :Optional[Any] = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(UpperCAmelCase_ , (p - 1) * (q - 1) ) == 1:
break
print('''Calculating d that is mod inverse of e...''' )
a :Union[str, Any] = cryptoMath.find_mod_inverse(UpperCAmelCase_ , (p - 1) * (q - 1) )
a :Tuple = (n, e)
a :Optional[int] = (n, d)
return (public_key, private_key)
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ):
"""simple docstring"""
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print('''\nWARNING:''' )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
a , a :List[str] = generate_key(UpperCAmelCase_ )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , '''w''' ) as out_file:
out_file.write(F'''{key_size},{public_key[0]},{public_key[1]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , '''w''' ) as out_file:
out_file.write(F'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 94 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = """megatron-bert"""
def __init__(self : Tuple , UpperCamelCase : Optional[int]=29056 , UpperCamelCase : Optional[Any]=1024 , UpperCamelCase : Any=24 , UpperCamelCase : int=16 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : int="gelu" , UpperCamelCase : int=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Any=512 , UpperCamelCase : int=2 , UpperCamelCase : Dict=0.02 , UpperCamelCase : Dict=1E-12 , UpperCamelCase : List[Any]=0 , UpperCamelCase : Optional[int]="absolute" , UpperCamelCase : List[Any]=True , **UpperCamelCase : str , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
| 2 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__)
class __lowerCAmelCase ( UpperCamelCase__):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_lowercase : str = field(default="""question-answering-extractive""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_lowercase : ClassVar[Features] = Features({"""question""": Value("""string"""), """context""": Value("""string""")})
_lowercase : ClassVar[Features] = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string"""),
"""answer_start""": Value("""int32"""),
})
})
_lowercase : str = "question"
_lowercase : str = "context"
_lowercase : str = "answers"
@property
def _lowercase ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 95 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
lowerCamelCase : Any = re.compile(R'([A-Z]+)([A-Z][a-z])')
lowerCamelCase : str = re.compile(R'([a-z\d])([A-Z])')
lowerCamelCase : Optional[int] = re.compile(R'(?<!_)_(?!_)')
lowerCamelCase : List[Any] = re.compile(R'(_{2,})')
lowerCamelCase : str = R'^\w+(\.\w+)*$'
lowerCamelCase : Dict = R'<>:/\|?*'
def _SCREAMING_SNAKE_CASE (A ) -> Any:
"""simple docstring"""
lowercase__ = _uppercase_uppercase_re.sub(R'''\1_\2''' , A )
lowercase__ = _lowercase_uppercase_re.sub(R'''\1_\2''' , A )
return name.lower()
def _SCREAMING_SNAKE_CASE (A ) -> Tuple:
"""simple docstring"""
lowercase__ = _single_underscore_re.split(A )
lowercase__ = [_multiple_underscores_re.split(A ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(A ) if n != '''''' )
def _SCREAMING_SNAKE_CASE (A ) -> Tuple:
"""simple docstring"""
if os.path.basename(A ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]:
"""simple docstring"""
if os.path.basename(A ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , A ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(A )}-{split}"
def _SCREAMING_SNAKE_CASE (A , A , A , A=None ) -> List[str]:
"""simple docstring"""
lowercase__ = filename_prefix_for_split(A , A )
if filetype_suffix:
prefix += f".{filetype_suffix}"
lowercase__ = os.path.join(A , A )
return f"{filepath}*"
def _SCREAMING_SNAKE_CASE (A , A , A , A=None , A=None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = filename_prefix_for_split(A , A )
lowercase__ = os.path.join(A , A )
if shard_lengths:
lowercase__ = len(A )
lowercase__ = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(A )]
if filetype_suffix:
lowercase__ = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
lowercase__ = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 2 | 0 |
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = """▁"""
lowercase__ = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
lowercase__ = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
lowercase__ = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
lowercase__ = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
lowercase__ = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["input_ids"]
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = RESOURCE_FILES_NAMES
def __init__( self , lowercase , lowercase=None , lowercase=False , lowercase="utf8" , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase = None , **lowercase , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , vocab_file=lowercase , encoding=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
_lowerCamelCase : str = do_lower_case
_lowerCamelCase : Optional[Any] = sentencepiece_model_ckpt
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
_lowerCamelCase : Dict = self.load_vocab(filepath=lowercase )
else:
_lowerCamelCase : Optional[int] = {self.sp_model.id_to_piece(lowercase ): id for id in range(self.sp_model.get_piece_size() )}
_lowerCamelCase : int = {v: k for k, v in self.vocab.items()}
def A_ ( self , lowercase ):
if text is None:
return None
_lowerCamelCase : Tuple = self.tokenize(lowercase )
_lowerCamelCase, _lowerCamelCase : Any = '', []
for i, ch in enumerate(lowercase ):
if ch in self.SP_CHAR_MAPPING:
_lowerCamelCase : List[str] = self.SP_CHAR_MAPPING.get(lowercase )
else:
_lowerCamelCase : Dict = unicodedata.normalize('NFKC' , lowercase )
if self.is_whitespace(lowercase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(lowercase ) )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = normalized_text, [], 0
if self.do_lower_case:
_lowerCamelCase : List[str] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
_lowerCamelCase : List[str] = token[1:]
_lowerCamelCase : Union[str, Any] = text[offset:].index(lowercase ) + offset
_lowerCamelCase : Optional[int] = start + len(lowercase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
_lowerCamelCase : Optional[Any] = end
return token_mapping
@property
def A_ ( self ):
return len(self.vocab )
def A_ ( self ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
_lowerCamelCase : str = self.__dict__.copy()
_lowerCamelCase : Optional[int] = None
return state
def __setstate__( self , lowercase ):
_lowerCamelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def A_ ( self , lowercase ):
return "".join((self.SP_CHAR_MAPPING.get(lowercase , lowercase ) for c in text) )
def A_ ( self , lowercase , lowercase=False , lowercase=64 , lowercase=0.1 ):
if self.sp_model_kwargs.get('enable_sampling' ) is True:
_lowerCamelCase : Optional[int] = True
if self.sp_model_kwargs.get('alpha' ) is not None:
_lowerCamelCase : Tuple = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
_lowerCamelCase : Tuple = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
_lowerCamelCase : Optional[Any] = self.sp_model.EncodeAsPieces(lowercase )
else:
_lowerCamelCase : List[str] = self.sp_model.SampleEncodeAsPieces(lowercase , lowercase , lowercase )
_lowerCamelCase : Tuple = []
for pi, piece in enumerate(lowercase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(lowercase ) and pi != 0:
new_pieces.append(lowercase )
continue
else:
continue
_lowerCamelCase : Tuple = 0
for i, chunk in enumerate(lowercase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(lowercase ) or self.is_punct(lowercase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(lowercase )
_lowerCamelCase : int = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_lowerCamelCase : Any = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_lowerCamelCase : Tuple = i
if len(lowercase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def A_ ( self , lowercase ):
_lowerCamelCase : Optional[int] = ''.join(lowercase ).replace(lowercase , ' ' ).strip()
return out_string
def A_ ( self , lowercase ):
_lowerCamelCase : Union[str, Any] = self.convert_ids_to_tokens(lowercase )
_lowerCamelCase : Any = ''.join(lowercase ).replace(lowercase , ' ' ).strip()
return out_string
def A_ ( self , lowercase ):
return self.vocab.get(lowercase , self.vocab.get(self.unk_token ) )
def A_ ( self , lowercase ):
return self.reverse_vocab.get(lowercase , self.unk_token )
def A_ ( self , lowercase , lowercase=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase : List[Any] = [self.cls_token_id]
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def A_ ( self , lowercase , lowercase=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def A_ ( self , lowercase , lowercase=None , lowercase=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase )) + [1, 1] + ([0] * len(lowercase )) + [1]
return [1] + ([0] * len(lowercase )) + [1]
def A_ ( self , lowercase , lowercase = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(lowercase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(lowercase ) + 1) + [1] * (len(lowercase ) + 3)
def A_ ( self , lowercase ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def A_ ( self , lowercase ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def A_ ( self , lowercase ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def A_ ( self , lowercase ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(lowercase ) == 1:
_lowerCamelCase : Tuple = unicodedata.category(lowercase )
if cat == "Zs":
return True
return False
def A_ ( self , lowercase ):
_lowerCamelCase : Tuple = {}
with io.open(lowercase , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(lowercase ):
_lowerCamelCase : int = line.rstrip('\n' )
_lowerCamelCase : Optional[Any] = int(lowercase )
return token_to_idx
def A_ ( self , lowercase , lowercase = None ):
_lowerCamelCase : Union[str, Any] = 0
if os.path.isdir(lowercase ):
_lowerCamelCase : List[Any] = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
_lowerCamelCase : Optional[int] = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(lowercase , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
_lowerCamelCase : Optional[Any] = token_index
writer.write(token + '\n' )
index += 1
_lowerCamelCase : List[str] = os.path.join(lowercase , 'sentencepiece.bpe.model' )
with open(lowercase , 'wb' ) as fi:
_lowerCamelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (vocab_file,) | 96 |
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : str , UpperCamelCase : Tuple , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Optional[int]=13 , UpperCamelCase : Tuple=16 , UpperCamelCase : Union[str, Any]=7 , UpperCamelCase : List[Any]=True , UpperCamelCase : List[str]=True , UpperCamelCase : str=True , UpperCamelCase : Tuple=False , UpperCamelCase : str=True , UpperCamelCase : Tuple=2 , UpperCamelCase : Optional[int]=32 , UpperCamelCase : Any=4 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Tuple=30 , UpperCamelCase : str=0 , UpperCamelCase : Tuple=1 , UpperCamelCase : List[Any]=2 , UpperCamelCase : str=None , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = decoder_seq_length
# For common tests
lowercase__ = self.decoder_seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_model
lowercase__ = decoder_layers
lowercase__ = decoder_layers
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_attention_heads
lowercase__ = decoder_attention_heads
lowercase__ = eos_token_id
lowercase__ = bos_token_id
lowercase__ = pad_token_id
lowercase__ = decoder_start_token_id
lowercase__ = use_cache
lowercase__ = max_position_embeddings
lowercase__ = None
lowercase__ = decoder_seq_length
lowercase__ = 2
lowercase__ = 1
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
lowercase__ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def UpperCamelCase__ (self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , ):
'''simple docstring'''
lowercase__ = True
lowercase__ = TrOCRDecoder(config=UpperCamelCase ).to(UpperCamelCase ).eval()
lowercase__ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase )
lowercase__ = model(UpperCamelCase )
lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 )
lowercase__ = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
lowercase__ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
lowercase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__ = model(UpperCamelCase )['''last_hidden_state''']
lowercase__ = model(UpperCamelCase , past_key_values=UpperCamelCase )['''last_hidden_state''']
# select random slice
lowercase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
lowercase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase (lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCAmelCase__ : List[Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCAmelCase__ : Optional[Any] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : List[str] = False
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
lowercase__ = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCamelCase )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*UpperCamelCase )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
| 2 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ :int = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
UpperCamelCase__ :int = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
UpperCamelCase__ :Optional[int] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
UpperCamelCase__ :Union[str, Any] = {'''unk_token''': '''<unk>'''}
UpperCamelCase__ :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
UpperCamelCase__ :Union[str, Any] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
UpperCamelCase__ :Any = os.path.join(self.tmpdirname , UpperCamelCase_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase__ :Optional[Any] = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.get_tokenizer()
UpperCamelCase__ :Dict = self.get_rust_tokenizer()
UpperCamelCase__ :Dict = self.get_image_processor()
UpperCamelCase__ :Optional[Any] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase__ :str = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase__ :Any = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ :Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase__ :Optional[Any] = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
UpperCamelCase__ :List[str] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.get_image_processor()
UpperCamelCase__ :str = self.get_tokenizer()
UpperCamelCase__ :List[Any] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :str = self.prepare_image_inputs()
UpperCamelCase__ :Optional[int] = image_processor(UpperCamelCase_ , return_tensors='''np''' )
UpperCamelCase__ :Dict = processor(images=UpperCamelCase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.get_image_processor()
UpperCamelCase__ :Optional[Any] = self.get_tokenizer()
UpperCamelCase__ :List[str] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :List[str] = '''lower newer'''
UpperCamelCase__ :str = processor(text=UpperCamelCase_ )
UpperCamelCase__ :Any = tokenizer(UpperCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.get_image_processor()
UpperCamelCase__ :Tuple = self.get_tokenizer()
UpperCamelCase__ :List[str] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :Tuple = '''lower newer'''
UpperCamelCase__ :List[Any] = self.prepare_image_inputs()
UpperCamelCase__ :List[Any] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self.get_image_processor()
UpperCamelCase__ :Any = self.get_tokenizer()
UpperCamelCase__ :Dict = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.prepare_image_inputs()
UpperCamelCase__ :Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ :List[Any] = processor(images=UpperCamelCase_ , visual_prompt=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.get_image_processor()
UpperCamelCase__ :Optional[Any] = self.get_tokenizer()
UpperCamelCase__ :Optional[int] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ :Union[str, Any] = processor.batch_decode(UpperCamelCase_ )
UpperCamelCase__ :Dict = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) | 97 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
if not isinstance(A , A ):
raise TypeError('''only integers accepted as input''' )
else:
lowercase__ = str(abs(A ) )
lowercase__ = [list(A ) for char in range(len(A ) )]
for index in range(len(A ) ):
num_transpositions[index].pop(A )
return max(
int(''''''.join(list(A ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 2 | 0 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case ( ctypes.Structure ):
"""simple docstring"""
snake_case__ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def a_ ( ):
if os.name == "nt":
UpperCAmelCase__ = CursorInfo()
UpperCAmelCase__ = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
UpperCAmelCase__ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def a_ ( ):
if os.name == "nt":
UpperCAmelCase__ = CursorInfo()
UpperCAmelCase__ = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
UpperCAmelCase__ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def a_ ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 98 |
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowerCamelCase : str = Mapping[str, np.ndarray]
lowerCamelCase : List[Any] = Mapping[str, Any] # Is a nested dict.
lowerCamelCase : Any = 0.0_1
@dataclasses.dataclass(frozen=lowercase_ )
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowerCAmelCase__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowerCAmelCase__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowerCAmelCase__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowerCAmelCase__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
lowerCAmelCase__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
lowerCAmelCase__ : Optional[Sequence[int]] = None
def _SCREAMING_SNAKE_CASE (A ) -> Protein:
"""simple docstring"""
lowercase__ = R'''(\[[A-Z]+\]\n)'''
lowercase__ = [tag.strip() for tag in re.split(A , A ) if len(A ) > 0]
lowercase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
lowercase__ = ["N", "CA", "C"]
lowercase__ = None
lowercase__ = None
lowercase__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowercase__ = g[1][0].strip()
for i in range(len(A ) ):
if seq[i] not in residue_constants.restypes:
lowercase__ = '''X''' # FIXME: strings are immutable
lowercase__ = np.array(
[residue_constants.restype_order.get(A , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowercase__ = []
for axis in range(3 ):
tertiary.append(list(map(A , g[1][axis].split() ) ) )
lowercase__ = np.array(A )
lowercase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(A ):
lowercase__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowercase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
lowercase__ = np.zeros(
(
len(A ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(A ):
lowercase__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=A , atom_mask=A , aatype=A , residue_index=np.arange(len(A ) ) , b_factors=A , )
def _SCREAMING_SNAKE_CASE (A , A = 0 ) -> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = prot.remark
if remark is not None:
pdb_headers.append(f"REMARK {remark}" )
lowercase__ = prot.parents
lowercase__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowercase__ = [p for i, p in zip(A , A ) if i == chain_id]
if parents is None or len(A ) == 0:
lowercase__ = ['''N/A''']
pdb_headers.append(f"PARENT {' '.join(A )}" )
return pdb_headers
def _SCREAMING_SNAKE_CASE (A , A ) -> str:
"""simple docstring"""
lowercase__ = []
lowercase__ = pdb_str.split('''\n''' )
lowercase__ = prot.remark
if remark is not None:
out_pdb_lines.append(f"REMARK {remark}" )
lowercase__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowercase__ = []
if prot.parents_chain_index is not None:
lowercase__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(A ) , [] )
parent_dict[str(A )].append(A )
lowercase__ = max([int(A ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowercase__ = parent_dict.get(str(A ) , ['''N/A'''] )
parents_per_chain.append(A )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowercase__ = [['''N/A''']]
def make_parent_line(A ) -> str:
return f"PARENT {' '.join(A )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowercase__ = 0
for i, l in enumerate(A ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(A )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(A ):
lowercase__ = parents_per_chain[chain_counter]
else:
lowercase__ = ['''N/A''']
out_pdb_lines.append(make_parent_line(A ) )
return "\n".join(A )
def _SCREAMING_SNAKE_CASE (A ) -> str:
"""simple docstring"""
lowercase__ = residue_constants.restypes + ['''X''']
def res_atoa(A ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
lowercase__ = residue_constants.atom_types
lowercase__ = []
lowercase__ = prot.atom_mask
lowercase__ = prot.aatype
lowercase__ = prot.atom_positions
lowercase__ = prot.residue_index.astype(np.intaa )
lowercase__ = prot.b_factors
lowercase__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
lowercase__ = get_pdb_headers(A )
if len(A ) > 0:
pdb_lines.extend(A )
lowercase__ = aatype.shape[0]
lowercase__ = 1
lowercase__ = 0
lowercase__ = string.ascii_uppercase
lowercase__ = None
# Add all atom sites.
for i in range(A ):
lowercase__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(A , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowercase__ = '''ATOM'''
lowercase__ = atom_name if len(A ) == 4 else f" {atom_name}"
lowercase__ = ''''''
lowercase__ = ''''''
lowercase__ = 1.00
lowercase__ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowercase__ = ''''''
lowercase__ = '''A'''
if chain_index is not None:
lowercase__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowercase__ = (
f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
f"{res_name_a:>3} {chain_tag:>1}"
f"{residue_index[i]:>4}{insertion_code:>1} "
f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
f"{occupancy:>6.2f}{b_factor:>6.2f} "
f"{element:>2}{charge:>2}"
)
pdb_lines.append(A )
atom_index += 1
lowercase__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowercase__ = True
lowercase__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowercase__ = '''TER'''
lowercase__ = (
f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(A )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(A , A ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(A )
def _SCREAMING_SNAKE_CASE (A ) -> np.ndarray:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _SCREAMING_SNAKE_CASE (A , A , A = None , A = None , A = None , A = None , A = None , ) -> Protein:
"""simple docstring"""
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=A , remark=A , parents=A , parents_chain_index=A , )
| 2 | 0 |
def A_ ( A__ , A__ ) -> float:
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(A__ ) * abs(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 99 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (A , A ) -> list[list[int]]:
"""simple docstring"""
lowercase__ = []
create_all_state(1 , A , A , [] , A )
return result
def _SCREAMING_SNAKE_CASE (A , A , A , A , A , ) -> None:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(A , total_number - level + 2 ):
current_list.append(A )
create_all_state(i + 1 , A , level - 1 , A , A )
current_list.pop()
def _SCREAMING_SNAKE_CASE (A ) -> None:
"""simple docstring"""
for i in total_list:
print(*A )
if __name__ == "__main__":
lowerCamelCase : Tuple = 4
lowerCamelCase : Union[str, Any] = 2
lowerCamelCase : Dict = generate_all_combinations(n, k)
print_all_state(total_list)
| 2 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Union[str, Any] = PhobertTokenizer
__lowercase : Any = False
def snake_case_ ( self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__SCREAMING_SNAKE_CASE = ["""T@@""", """i""", """I""", """R@@""", """r""", """e@@"""]
__SCREAMING_SNAKE_CASE = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
__SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """l à</w>"""]
__SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(lowerCAmelCase__))
def snake_case_ ( self , **lowerCAmelCase__):
kwargs.update(self.special_tokens_map)
return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = """Tôi là VinAI Research"""
__SCREAMING_SNAKE_CASE = """T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"""
return input_text, output_text
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
__SCREAMING_SNAKE_CASE = """Tôi là VinAI Research"""
__SCREAMING_SNAKE_CASE = """T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h""".split()
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
print(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , lowerCAmelCase__)
| 100 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCamelCase : Optional[Any] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
lowerCamelCase : Tuple = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
lowerCamelCase : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
lowerCamelCase : Any = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
lowerCamelCase : Tuple = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
lowerCamelCase : Optional[int] = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
lowerCamelCase : Dict = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
"""simple docstring"""
lowercase__ ,lowercase__ = randrange(len(A ) ), randrange(len(A ) )
lowercase__ = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
lowercase__ ,lowercase__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _SCREAMING_SNAKE_CASE (A = 100 ) -> str:
"""simple docstring"""
return (generate_random_hand() for _ in range(A ))
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]:
"""simple docstring"""
assert PokerHand(A )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(A )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , A )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Any:
"""simple docstring"""
lowercase__ = PokerHand(A )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Tuple:
"""simple docstring"""
assert PokerHand(A )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(A )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected
def _SCREAMING_SNAKE_CASE () -> Tuple:
"""simple docstring"""
lowercase__ = [PokerHand(A ) for hand in SORTED_HANDS]
lowercase__ = poker_hands.copy()
shuffle(A )
lowercase__ = chain(sorted(A ) )
for index, hand in enumerate(A ):
assert hand == poker_hands[index]
def _SCREAMING_SNAKE_CASE () -> List[Any]:
"""simple docstring"""
lowercase__ = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=A )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _SCREAMING_SNAKE_CASE () -> int:
"""simple docstring"""
lowercase__ = PokerHand('''2C 4S AS 3D 5C''' )
lowercase__ = True
lowercase__ = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
"""simple docstring"""
lowercase__ = 0
lowercase__ = os.path.abspath(os.path.dirname(A ) )
lowercase__ = os.path.join(A , '''poker_hands.txt''' )
with open(A ) as file_hand:
for line in file_hand:
lowercase__ = line[:14].strip()
lowercase__ = line[15:].strip()
lowercase__ ,lowercase__ = PokerHand(A ), PokerHand(A )
lowercase__ = player.compare_with(A )
if output == "Win":
answer += 1
assert answer == 376
| 2 | 0 |