code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = checkpoint
lowerCAmelCase_ :Union[str, Any] = {}
lowerCAmelCase_ :List[str] = vae_state_dict["encoder.conv_in.weight"]
lowerCAmelCase_ :str = vae_state_dict["encoder.conv_in.bias"]
lowerCAmelCase_ :str = vae_state_dict["encoder.conv_out.weight"]
lowerCAmelCase_ :Optional[int] = vae_state_dict["encoder.conv_out.bias"]
lowerCAmelCase_ :str = vae_state_dict["encoder.norm_out.weight"]
lowerCAmelCase_ :Dict = vae_state_dict["encoder.norm_out.bias"]
lowerCAmelCase_ :int = vae_state_dict["decoder.conv_in.weight"]
lowerCAmelCase_ :str = vae_state_dict["decoder.conv_in.bias"]
lowerCAmelCase_ :Tuple = vae_state_dict["decoder.conv_out.weight"]
lowerCAmelCase_ :str = vae_state_dict["decoder.conv_out.bias"]
lowerCAmelCase_ :Dict = vae_state_dict["decoder.norm_out.weight"]
lowerCAmelCase_ :Optional[Any] = vae_state_dict["decoder.norm_out.bias"]
lowerCAmelCase_ :str = vae_state_dict["quant_conv.weight"]
lowerCAmelCase_ :str = vae_state_dict["quant_conv.bias"]
lowerCAmelCase_ :int = vae_state_dict["post_quant_conv.weight"]
lowerCAmelCase_ :int = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase_ :Dict = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
lowerCAmelCase_ :Optional[Any] = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(_snake_case )
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase_ :Optional[int] = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
lowerCAmelCase_ :int = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(_snake_case )
}
for i in range(_snake_case ):
lowerCAmelCase_ :Tuple = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
lowerCAmelCase_ :Optional[int] = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
lowerCAmelCase_ :Any = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
lowerCAmelCase_ :List[Any] = renew_vae_resnet_paths(_snake_case )
lowerCAmelCase_ :Union[str, Any] = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(_snake_case , _snake_case , _snake_case , additional_replacements=[meta_path] , config=_snake_case )
lowerCAmelCase_ :Union[str, Any] = [key for key in vae_state_dict if "encoder.mid.block" in key]
lowerCAmelCase_ :Dict = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ :Any = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
lowerCAmelCase_ :Optional[int] = renew_vae_resnet_paths(_snake_case )
lowerCAmelCase_ :str = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(_snake_case , _snake_case , _snake_case , additional_replacements=[meta_path] , config=_snake_case )
lowerCAmelCase_ :Any = [key for key in vae_state_dict if "encoder.mid.attn" in key]
lowerCAmelCase_ :Optional[Any] = renew_vae_attention_paths(_snake_case )
lowerCAmelCase_ :Optional[Any] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(_snake_case , _snake_case , _snake_case , additional_replacements=[meta_path] , config=_snake_case )
conv_attn_to_linear(_snake_case )
for i in range(_snake_case ):
lowerCAmelCase_ :Tuple = num_up_blocks - 1 - i
lowerCAmelCase_ :Union[str, Any] = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
lowerCAmelCase_ :int = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
lowerCAmelCase_ :int = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
lowerCAmelCase_ :Optional[Any] = renew_vae_resnet_paths(_snake_case )
lowerCAmelCase_ :Tuple = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(_snake_case , _snake_case , _snake_case , additional_replacements=[meta_path] , config=_snake_case )
lowerCAmelCase_ :Optional[int] = [key for key in vae_state_dict if "decoder.mid.block" in key]
lowerCAmelCase_ :Union[str, Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ :Optional[int] = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
lowerCAmelCase_ :Optional[Any] = renew_vae_resnet_paths(_snake_case )
lowerCAmelCase_ :Optional[int] = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(_snake_case , _snake_case , _snake_case , additional_replacements=[meta_path] , config=_snake_case )
lowerCAmelCase_ :Dict = [key for key in vae_state_dict if "decoder.mid.attn" in key]
lowerCAmelCase_ :Optional[int] = renew_vae_attention_paths(_snake_case )
lowerCAmelCase_ :int = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(_snake_case , _snake_case , _snake_case , additional_replacements=[meta_path] , config=_snake_case )
conv_attn_to_linear(_snake_case )
return new_checkpoint
def _snake_case ( lowercase__ : str , lowercase__ : str , ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
lowerCAmelCase_ :Any = io.BytesIO(r.content )
lowerCAmelCase_ :List[Any] = OmegaConf.load(_snake_case )
lowerCAmelCase_ :Optional[int] = 5_1_2
lowerCAmelCase_ :str = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
lowerCAmelCase_ :List[Any] = {}
with safe_open(_snake_case , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
lowerCAmelCase_ :str = f.get_tensor(_snake_case )
else:
lowerCAmelCase_ :Any = torch.load(_snake_case , map_location=_snake_case )["state_dict"]
# Convert the VAE model.
lowerCAmelCase_ :List[str] = create_vae_diffusers_config(_snake_case , image_size=_snake_case )
lowerCAmelCase_ :str = custom_convert_ldm_vae_checkpoint(_snake_case , _snake_case )
lowerCAmelCase_ :Tuple = AutoencoderKL(**_snake_case )
vae.load_state_dict(_snake_case )
vae.save_pretrained(_snake_case )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
__UpperCAmelCase = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 368 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['DeiTFeatureExtractor']
__UpperCAmelCase = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 369 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n'
@dataclass
class _SCREAMING_SNAKE_CASE ( _a ):
UpperCAmelCase_ :Any = 42
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self , __A , __A , __A , __A , __A , ) -> Optional[int]:
super().__init__()
self.register_modules(
prior=snake_case_ , image_encoder=snake_case_ , image_processor=snake_case_ , scheduler=snake_case_ , renderer=snake_case_ , )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A ) -> Optional[Any]:
if latents is None:
lowerCAmelCase_ :int = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCAmelCase_ :Union[str, Any] = latents.to(snake_case_ )
lowerCAmelCase_ :List[str] = latents * scheduler.init_noise_sigma
return latents
def __lowerCAmelCase ( self , __A=0 ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCAmelCase_ :Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
lowerCAmelCase_ :Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case_ , snake_case_ )
@property
def __lowerCAmelCase ( self ) -> List[Any]:
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(snake_case_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __lowerCAmelCase ( self , __A , __A , __A , __A , ) -> Optional[int]:
if isinstance(snake_case_ , snake_case_ ) and isinstance(image[0] , torch.Tensor ):
lowerCAmelCase_ :Any = torch.cat(snake_case_ , axis=0 ) if image[0].ndim == 4 else torch.stack(snake_case_ , axis=0 )
if not isinstance(snake_case_ , torch.Tensor ):
lowerCAmelCase_ :List[Any] = self.image_processor(snake_case_ , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
lowerCAmelCase_ :int = image.to(dtype=self.image_encoder.dtype , device=snake_case_ )
lowerCAmelCase_ :Optional[Any] = self.image_encoder(snake_case_ )["""last_hidden_state"""]
lowerCAmelCase_ :Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCAmelCase_ :Union[str, Any] = image_embeds.repeat_interleave(snake_case_ , dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase_ :Any = torch.zeros_like(snake_case_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase_ :List[Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(snake_case_ )
def __call__( self , __A , __A = 1 , __A = 25 , __A = None , __A = None , __A = 4.0 , __A = 64 , __A = "pil" , __A = True , ) -> List[Any]:
if isinstance(snake_case_ , PIL.Image.Image ):
lowerCAmelCase_ :Union[str, Any] = 1
elif isinstance(snake_case_ , torch.Tensor ):
lowerCAmelCase_ :Union[str, Any] = image.shape[0]
elif isinstance(snake_case_ , snake_case_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCAmelCase_ :Dict = len(snake_case_ )
else:
raise ValueError(
f"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(snake_case_ )}""" )
lowerCAmelCase_ :Any = self._execution_device
lowerCAmelCase_ :Any = batch_size * num_images_per_prompt
lowerCAmelCase_ :Union[str, Any] = guidance_scale > 1.0
lowerCAmelCase_ :Tuple = self._encode_image(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# prior
self.scheduler.set_timesteps(snake_case_ , device=snake_case_ )
lowerCAmelCase_ :Tuple = self.scheduler.timesteps
lowerCAmelCase_ :Optional[Any] = self.prior.config.num_embeddings
lowerCAmelCase_ :int = self.prior.config.embedding_dim
lowerCAmelCase_ :List[Any] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , snake_case_ , snake_case_ , snake_case_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCAmelCase_ :Dict = latents.reshape(latents.shape[0] , snake_case_ , snake_case_ )
for i, t in enumerate(self.progress_bar(snake_case_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase_ :Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase_ :Tuple = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
lowerCAmelCase_ :Any = self.prior(
snake_case_ , timestep=snake_case_ , proj_embedding=snake_case_ , ).predicted_image_embedding
# remove the variance
lowerCAmelCase_ :Tuple = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCAmelCase_ :List[Any] = noise_pred.chunk(2 )
lowerCAmelCase_ :Any = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCAmelCase_ :int = self.scheduler.step(
snake_case_ , timestep=snake_case_ , sample=snake_case_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=snake_case_ )
lowerCAmelCase_ :Optional[int] = []
for i, latent in enumerate(snake_case_ ):
print()
lowerCAmelCase_ :List[Any] = self.renderer.decode(
latent[None, :] , snake_case_ , size=snake_case_ , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(snake_case_ )
lowerCAmelCase_ :Tuple = torch.stack(snake_case_ )
if output_type not in ["np", "pil"]:
raise ValueError(f"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
lowerCAmelCase_ :Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
lowerCAmelCase_ :str = [self.numpy_to_pil(snake_case_ ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=snake_case_ )
| 370 |
"""simple docstring"""
__UpperCAmelCase = 2_56
# Modulus to hash a string
__UpperCAmelCase = 1_00_00_03
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :Tuple = len(lowercase__ )
lowerCAmelCase_ :List[str] = len(lowercase__ )
if p_len > t_len:
return False
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :Any = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase__ ):
lowerCAmelCase_ :int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCAmelCase_ :Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCAmelCase_ :Optional[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCAmelCase_ :Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _snake_case ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ :int = """abc1abc12"""
lowerCAmelCase_ :Dict = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowerCAmelCase_ :int = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(lowercase__ , lowercase__ ) and not rabin_karp(lowercase__ , lowercase__ )
# Test 2)
lowerCAmelCase_ :Dict = """ABABX"""
lowerCAmelCase_ :int = """ABABZABABYABABX"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 3)
lowerCAmelCase_ :Union[str, Any] = """AAAB"""
lowerCAmelCase_ :List[str] = """ABAAAAAB"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 4)
lowerCAmelCase_ :Dict = """abcdabcy"""
lowerCAmelCase_ :Union[str, Any] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 5)
lowerCAmelCase_ :Optional[int] = """Lü"""
lowerCAmelCase_ :Optional[int] = """Lüsai"""
assert rabin_karp(lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[int] = """Lue"""
assert not rabin_karp(lowercase__ , lowercase__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 1 | 0 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__UpperCAmelCase = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
__UpperCAmelCase = 10
__UpperCAmelCase = 2_56
def _snake_case ( lowercase__ : List[str] ) -> Optional[MinHash]:
'''simple docstring'''
if len(__lowerCAmelCase ) < MIN_NUM_TOKENS:
return None
lowerCAmelCase_ :List[str] = MinHash(num_perm=__lowerCAmelCase )
for token in set(__lowerCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def _snake_case ( lowercase__ : Any ) -> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(__lowerCAmelCase ) if len(t.strip() ) > 0}
class _SCREAMING_SNAKE_CASE :
def __init__( self , *,
__A = 0.8_5 , ) -> Dict:
lowerCAmelCase_ :Dict = duplication_jaccard_threshold
lowerCAmelCase_ :List[str] = NUM_PERM
lowerCAmelCase_ :Dict = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCAmelCase_ :str = defaultdict(__lowercase )
def __lowerCAmelCase ( self , __A , __A ) -> int:
lowerCAmelCase_ :Dict = self._index.query(__lowercase )
if code_key in self._index.keys:
print(f"""Duplicate key {code_key}""" )
return
self._index.insert(__lowercase , __lowercase )
if len(__lowercase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__lowercase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__lowercase )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :str = []
for base, duplicates in self._duplicate_clusters.items():
lowerCAmelCase_ :Optional[int] = [base] + list(__lowercase )
# reformat the cluster to be a list of dict
lowerCAmelCase_ :Union[str, Any] = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(__lowercase )
return duplicate_clusters
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
lowerCAmelCase_ :Optional[Any] = self.get_duplicate_clusters()
with open(__lowercase , """w""" ) as f:
json.dump(__lowercase , __lowercase )
def _snake_case ( lowercase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = element
lowerCAmelCase_ :Union[str, Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _snake_case ( lowercase__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__lowerCAmelCase , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[int] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :int = DuplicationIndex(duplication_jaccard_threshold=__lowerCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__lowerCAmelCase ) ) , max_queue_size=1_0_0 ) ):
di.add(__lowerCAmelCase , __lowerCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _snake_case ( lowercase__ : List[Any] , lowercase__ : int ) -> float:
'''simple docstring'''
lowerCAmelCase_ :Dict = get_tokens(__lowerCAmelCase )
lowerCAmelCase_ :Any = get_tokens(__lowerCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__UpperCAmelCase = None
def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : Any ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = []
for elementa in cluster:
lowerCAmelCase_ :Optional[int] = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
lowerCAmelCase_ :Optional[Any] = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(__lowerCAmelCase , __lowerCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCAmelCase_ :Tuple = 1
extremes.append(__lowerCAmelCase )
return extremes
def _snake_case ( lowercase__ : str , lowercase__ : Dict , lowercase__ : int ) -> List[str]:
'''simple docstring'''
global _shared_dataset
lowerCAmelCase_ :Any = dataset
lowerCAmelCase_ :Optional[Any] = []
lowerCAmelCase_ :Tuple = partial(_find_cluster_extremes_shared , jaccard_threshold=__lowerCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__lowerCAmelCase , __lowerCAmelCase , ) , total=len(__lowerCAmelCase ) , ):
extremes_list.append(__lowerCAmelCase )
return extremes_list
def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[int] = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = make_duplicate_clusters(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase_ :List[str] = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
lowerCAmelCase_ :Any = {}
lowerCAmelCase_ :str = find_extremes(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
lowerCAmelCase_ :Union[str, Any] = element
lowerCAmelCase_ :List[Any] = duplicate_indices - set(extreme_dict.keys() )
lowerCAmelCase_ :str = dataset.filter(lambda lowercase__ , lowercase__ : idx not in remove_indices , with_indices=__lowerCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCAmelCase_ :Union[str, Any] = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
lowerCAmelCase_ :Any = extreme_dict[element['''base_index''']]['''copies''']
print(f"""Original dataset size: {len(__lowerCAmelCase )}""" )
print(f"""Number of duplicate clusters: {len(__lowerCAmelCase )}""" )
print(f"""Files in duplicate cluster: {len(__lowerCAmelCase )}""" )
print(f"""Unique files in duplicate cluster: {len(__lowerCAmelCase )}""" )
print(f"""Filtered dataset size: {len(__lowerCAmelCase )}""" )
return ds_filter, duplicate_clusters
| 371 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ :Optional[Any] = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :str = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ :int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ :List[Any] = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ :List[str] = 8
else:
lowerCAmelCase_ :Optional[int] = None
return tokenizer.pad(
lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1":
lowerCAmelCase_ :Optional[Any] = 2
# New Code #
lowerCAmelCase_ :List[str] = int(args.gradient_accumulation_steps )
lowerCAmelCase_ :int = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase_ :str = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :int = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ :Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ :Optional[Any] = AdamW(params=model.parameters() , lr=lowercase__ )
# Instantiate scheduler
lowerCAmelCase_ :Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
with LocalSGD(
accelerator=lowercase__ , model=lowercase__ , local_sgd_steps=lowercase__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase__ ):
lowerCAmelCase_ :str = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = output.loss
accelerator.backward(lowercase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
def _snake_case ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowercase__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=lowercase__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase_ :Optional[Any] = parser.parse_args()
lowerCAmelCase_ :Tuple = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( lowercase__ : Union[str, Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = 'huggingface/label-files'
lowerCAmelCase_ :Union[str, Any] = 'imagenet-1k-id2label.json'
lowerCAmelCase_ :int = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ :Dict = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase_ :Union[str, Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ :Optional[Any] = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowerCAmelCase_ :Dict = BitConfig(
conv_layer=_UpperCAmelCase , num_labels=1_0_0_0 , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase , )
return config
def _snake_case ( lowercase__ : int ) -> Union[str, Any]:
'''simple docstring'''
if "stem.conv" in name:
lowerCAmelCase_ :Dict = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCAmelCase_ :Tuple = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
lowerCAmelCase_ :Any = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
lowerCAmelCase_ :List[Any] = 'bit.' + name
if "bit" not in name and "classifier" not in name:
lowerCAmelCase_ :Any = 'bit.encoder.' + name
return name
def _snake_case ( ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase_ :List[str] = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def _snake_case ( lowercase__ : Dict , lowercase__ : Any , lowercase__ : Any=False ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :Any = get_config(_UpperCAmelCase )
# load original model from timm
lowerCAmelCase_ :Union[str, Any] = create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase )
timm_model.eval()
# load state_dict of original model
lowerCAmelCase_ :Tuple = timm_model.state_dict()
for key in state_dict.copy().keys():
lowerCAmelCase_ :Union[str, Any] = state_dict.pop(_UpperCAmelCase )
lowerCAmelCase_ :Any = val.squeeze() if 'head' in key else val
# load HuggingFace model
lowerCAmelCase_ :Any = BitForImageClassification(_UpperCAmelCase )
model.eval()
model.load_state_dict(_UpperCAmelCase )
# create image processor
lowerCAmelCase_ :List[Any] = create_transform(**resolve_data_config({} , model=_UpperCAmelCase ) )
lowerCAmelCase_ :Tuple = transform.transforms
lowerCAmelCase_ :Any = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
lowerCAmelCase_ :Union[str, Any] = BitImageProcessor(
do_resize=_UpperCAmelCase , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_UpperCAmelCase , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=_UpperCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCAmelCase_ :Optional[int] = prepare_img()
lowerCAmelCase_ :Optional[int] = transform(_UpperCAmelCase ).unsqueeze(0 )
lowerCAmelCase_ :str = processor(_UpperCAmelCase , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase )
# verify logits
with torch.no_grad():
lowerCAmelCase_ :Tuple = model(_UpperCAmelCase )
lowerCAmelCase_ :List[Any] = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
lowerCAmelCase_ :Any = timm_model(_UpperCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCAmelCase , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__UpperCAmelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 350 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 , lowercase__ : str = "bert-base-cased" ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained(lowercase__ )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ :str = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : int ) -> List[str]:
'''simple docstring'''
model.eval()
lowerCAmelCase_ :Dict = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
lowerCAmelCase_ :Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase_ :Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Tuple = metric.compute()
return eval_metric["accuracy"]
def _snake_case ( lowercase__ : str , lowercase__ : List[str] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :Optional[int] = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Optional[Any] = args.model_name_or_path
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :str = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
lowerCAmelCase_ :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ :str = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ :Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCAmelCase_ :Any = 1
lowerCAmelCase_ :str = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ :List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
lowerCAmelCase_ :int = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ :List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ :List[Any] = 0
lowerCAmelCase_ :str = evaluate.load("""glue""" , """mrpc""" )
lowerCAmelCase_ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase_ :Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase_ :Optional[Any] = args.resume_from_checkpoint.split("""epoch_""" )[1]
lowerCAmelCase_ :int = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase_ :Union[str, Any] = int(lowercase__ ) + 1
lowerCAmelCase_ :Optional[int] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.print("""resumed checkpoint performance:""" , lowercase__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
lowerCAmelCase_ :List[str] = json.load(lowercase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase_ :List[Any] = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Dict = outputs.loss
lowerCAmelCase_ :int = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase_ :List[str] = f"""epoch_{epoch}"""
lowerCAmelCase_ :Any = os.path.join(args.output_dir , lowercase__ )
accelerator.save_state(lowercase__ )
lowerCAmelCase_ :List[Any] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :Union[str, Any] = accuracy
lowerCAmelCase_ :Any = lr_scheduler.get_lr()[0]
lowerCAmelCase_ :str = optimizer.param_groups[0]["""lr"""]
lowerCAmelCase_ :List[Any] = epoch
lowerCAmelCase_ :Tuple = overall_step
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def _snake_case ( ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowercase__ , default=lowercase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowercase__ , default=lowercase__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=2 , help="""Number of train epochs.""" , )
lowerCAmelCase_ :Optional[int] = parser.parse_args()
lowerCAmelCase_ :List[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 0 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Tuple = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :str = self._create_example_records()
lowerCAmelCase_ :str = Dataset.from_list(__SCREAMING_SNAKE_CASE )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(__SCREAMING_SNAKE_CASE ):
self.assertDictEqual(__SCREAMING_SNAKE_CASE , example_records[i] )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Optional[Any] = self._create_example_records()
lowerCAmelCase_ :Optional[Any] = Dataset.from_list(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ :Tuple = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def __lowerCAmelCase ( self ) -> Tuple: # checks what happens with missing columns
lowerCAmelCase_ :Dict = [{"""col_1""": 1}, {"""col_2""": """x"""}]
lowerCAmelCase_ :List[str] = Dataset.from_list(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def __lowerCAmelCase ( self ) -> Optional[Any]: # checks if the type can be inferred from the second record
lowerCAmelCase_ :List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
lowerCAmelCase_ :Optional[Any] = Dataset.from_list(__SCREAMING_SNAKE_CASE )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = Dataset.from_list([] )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 351 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Union[str, Any]:
if isinstance(__A , __A ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowerCAmelCase_ :Tuple = deepcopy(__A )
elif os.path.exists(__A ):
with io.open(__A , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase_ :str = json.load(__A )
else:
try:
lowerCAmelCase_ :Dict = baseaa.urlsafe_baadecode(__A ).decode("""utf-8""" )
lowerCAmelCase_ :int = json.loads(__A )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
lowerCAmelCase_ :Optional[Any] = config
self.set_stage_and_offload()
def __lowerCAmelCase ( self ) -> Tuple:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowerCAmelCase_ :Tuple = self.get_value("""zero_optimization.stage""" , -1 )
# offload
lowerCAmelCase_ :Dict = False
if self.is_zeroa() or self.is_zeroa():
lowerCAmelCase_ :Optional[int] = set(["""cpu""", """nvme"""] )
lowerCAmelCase_ :Union[str, Any] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowerCAmelCase_ :Optional[int] = True
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
lowerCAmelCase_ :str = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Tuple = ds_key_long.split(""".""" )
lowerCAmelCase_ :List[str] = nodes.pop()
for node in nodes:
lowerCAmelCase_ :Tuple = config.get(__A )
if config is None:
return None, ds_key
return config, ds_key
def __lowerCAmelCase ( self , __A , __A=None ) -> Optional[Any]:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.find_config_node(__A )
if config is None:
return default
return config.get(__A , __A )
def __lowerCAmelCase ( self , __A , __A=False ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Union[str, Any] = ds_key_long.split(""".""" )
for node in nodes:
lowerCAmelCase_ :int = config
lowerCAmelCase_ :Any = config.get(__A )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__A )
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = self.get_value(__A )
return False if value is None else bool(__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
lowerCAmelCase_ :List[str] = self.get_value(__A )
return False if value is None else not bool(__A )
def __lowerCAmelCase ( self ) -> str:
return self._stage == 2
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._stage == 3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._offload
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Optional[int]:
lowerCAmelCase_ :Dict = engine
def __lowerCAmelCase ( self , __A , **__A ) -> str:
# runs backpropagation and handles mixed precision
self.engine.backward(__A , **__A )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> List[str]:
super().__init__(__A , device_placement=__A , scaler=__A )
lowerCAmelCase_ :List[str] = hasattr(self.optimizer , """overflow""" )
def __lowerCAmelCase ( self , __A=None ) -> Optional[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __lowerCAmelCase ( self ) -> List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __lowerCAmelCase ( self ) -> int:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A ) -> Optional[int]:
super().__init__(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=0.0_0_1 , __A=0 , **__A ) -> List[Any]:
lowerCAmelCase_ :str = params
lowerCAmelCase_ :Any = lr
lowerCAmelCase_ :List[Any] = weight_decay
lowerCAmelCase_ :Any = kwargs
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=None , __A=0 , **__A ) -> List[str]:
lowerCAmelCase_ :Optional[int] = optimizer
lowerCAmelCase_ :int = total_num_steps
lowerCAmelCase_ :List[Any] = warmup_num_steps
lowerCAmelCase_ :int = kwargs
| 1 | 0 |
"""simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__UpperCAmelCase = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
__UpperCAmelCase = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
__UpperCAmelCase = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def _snake_case ( lowercase__ : Any , lowercase__ : List[str] ) -> Dict:
'''simple docstring'''
return float((preds == labels).mean() )
def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :Any = simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_ :Optional[int] = float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case ( lowercase__ : int , lowercase__ : int ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :int = float(pearsonr(lowerCamelCase__ , lowerCamelCase__ )[0] )
lowerCAmelCase_ :int = float(spearmanr(lowerCamelCase__ , lowerCamelCase__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> Dict:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def __lowerCAmelCase ( self , __A , __A ) -> Dict:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__A , __A )}
elif self.config_name == "stsb":
return pearson_and_spearman(__A , __A )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__A , __A )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__A , __A )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 352 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCAmelCase_ :List[str] = "CIDAS/clipseg-rd64-refined"
UpperCAmelCase_ :List[Any] = "image_segmenter"
UpperCAmelCase_ :Optional[int] = CLIPSegForImageSegmentation
UpperCAmelCase_ :Tuple = ["image", "text"]
UpperCAmelCase_ :Dict = ["image"]
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A ) -> Any:
return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" )
def __lowerCAmelCase ( self , __A ) -> Tuple:
with torch.no_grad():
lowerCAmelCase_ :Dict = self.model(**__A ).logits
return logits
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :Optional[int] = outputs.cpu().detach().numpy()
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :str = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 1 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 353 |
"""simple docstring"""
def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :Dict = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase_ :str = values[index] + knapsack(
lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 )
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :Optional[Union[str, Path]] = None
UpperCAmelCase_ :bool = False
UpperCAmelCase_ :bool = False
UpperCAmelCase_ :bool = False
UpperCAmelCase_ :Optional[Dict] = None
UpperCAmelCase_ :Optional[str] = None
UpperCAmelCase_ :bool = False
UpperCAmelCase_ :bool = False
UpperCAmelCase_ :bool = False
UpperCAmelCase_ :bool = True
UpperCAmelCase_ :Optional[int] = None
UpperCAmelCase_ :int = 1
UpperCAmelCase_ :Optional[Union[str, bool]] = None
UpperCAmelCase_ :bool = False
UpperCAmelCase_ :Optional[Dict] = None
UpperCAmelCase_ :Optional[str] = None
def __lowerCAmelCase ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(_SCREAMING_SNAKE_CASE ) for k, v in self.__dict__.items()} )
| 354 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _snake_case ( lowercase__ : bool = True , *lowercase__ : Optional[int] , **lowercase__ : str ) -> Optional[Any]:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
lowerCAmelCase_ :Tuple = False
if main_process_only:
lowerCAmelCase_ :Dict = PartialState().local_process_index == 0
return _tqdm(*lowercase__ , **lowercase__ , disable=lowercase__ )
| 1 | 0 |
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
__UpperCAmelCase = TypeVar('T')
__UpperCAmelCase = Union[List[T], Tuple[T, ...]]
__UpperCAmelCase = Union[T, List[T], Dict[str, T]]
__UpperCAmelCase = Union[str, bytes, os.PathLike]
| 355 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__UpperCAmelCase = get_tests_dir('fixtures/dummy-config.json')
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = 0
def __lowerCAmelCase ( self ) -> List[str]:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCAmelCase_ :int = os.path.join(__A , """fake-roberta""" )
os.makedirs(__A , exist_ok=__A )
with open(os.path.join(__A , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertEqual(type(__A ) , __A )
def __lowerCAmelCase ( self ) -> Optional[int]:
try:
AutoConfig.register("""custom""" , __A )
# Wrong model type will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""model""" , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""bert""" , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase_ :Union[str, Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Optional[int] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowerCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
__A , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""bert-base""" )
def __lowerCAmelCase ( self ) -> Any:
with self.assertRaisesRegex(
__A , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , revision="""aaaaaa""" )
def __lowerCAmelCase ( self ) -> int:
with self.assertRaisesRegex(
__A , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def __lowerCAmelCase ( self ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
lowerCAmelCase_ :str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def __lowerCAmelCase ( self ) -> int:
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :int = "new-model"
try:
AutoConfig.register("""new-model""" , __A )
# If remote code is not set, the default is to use local
lowerCAmelCase_ :Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
lowerCAmelCase_ :Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 1 | 0 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 356 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :str = GPTSanJapaneseTokenizer
UpperCAmelCase_ :Optional[int] = False
UpperCAmelCase_ :Optional[int] = {"do_clean_text": False, "add_prefix_space": False}
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
# fmt: off
lowerCAmelCase_ :Dict = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase_ :List[str] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowerCAmelCase_ :int = {"""unk_token""": """<unk>"""}
lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__A ) )
def __lowerCAmelCase ( self , **__A ) -> int:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__A )
def __lowerCAmelCase ( self , __A ) -> Dict:
lowerCAmelCase_ :List[Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowerCAmelCase_ :Optional[int] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def __lowerCAmelCase ( self , __A ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = self.get_input_output_texts(__A )
lowerCAmelCase_ :List[str] = tokenizer.encode(__A , add_special_tokens=__A )
lowerCAmelCase_ :str = tokenizer.decode(__A , clean_up_tokenization_spaces=__A )
return text, ids
def __lowerCAmelCase ( self ) -> str:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Dict:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> int:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Union[str, Any] = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ :Optional[int] = """こんにちは、世界。 こんばんは、㔺界。"""
lowerCAmelCase_ :Any = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowerCAmelCase_ :Tuple = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids without special tokens
lowerCAmelCase_ :List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCAmelCase_ :List[str] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids with special tokens
lowerCAmelCase_ :Any = tokens + [tokenizer.unk_token]
lowerCAmelCase_ :Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCAmelCase_ :Union[str, Any] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ :Optional[int] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowerCAmelCase_ :str = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowerCAmelCase_ :str = tokenizer.encode(__A )
lowerCAmelCase_ :Dict = tokenizer.decode(__A )
self.assertEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase_ :Optional[Any] = """こんにちは、世界。"""
lowerCAmelCase_ :Any = """こんばんは、㔺界。😀"""
lowerCAmelCase_ :Optional[Any] = """こんにちは、世界。こんばんは、世界。😀"""
lowerCAmelCase_ :List[Any] = tokenizer.encode(prefix_text + input_text )
lowerCAmelCase_ :List[str] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowerCAmelCase_ :int = tokenizer.encode(__A , prefix_text=__A )
lowerCAmelCase_ :int = tokenizer.decode(__A )
lowerCAmelCase_ :Dict = tokenizer.decode(__A )
lowerCAmelCase_ :Tuple = tokenizer.decode(__A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase_ :List[Any] = """こんにちは、世界。"""
lowerCAmelCase_ :Optional[int] = """こんばんは、㔺界。😀"""
lowerCAmelCase_ :List[str] = len(tokenizer.encode(__A ) ) - 2
lowerCAmelCase_ :Dict = len(tokenizer.encode(__A ) ) - 2
lowerCAmelCase_ :int = [1] + [0] * (len_prefix + len_text + 1)
lowerCAmelCase_ :List[Any] = [1] * (len_prefix + len_text + 1) + [0]
lowerCAmelCase_ :Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCAmelCase_ :List[Any] = tokenizer(prefix_text + input_text ).token_type_ids
lowerCAmelCase_ :List[str] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowerCAmelCase_ :List[Any] = tokenizer(__A , prefix_text=__A ).token_type_ids
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase_ :int = tokenizer.encode("""あンいワ""" )
lowerCAmelCase_ :Optional[Any] = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowerCAmelCase_ :int = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertNotEqual(__A , __A )
self.assertNotEqual(__A , __A )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase_ :int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowerCAmelCase_ :Dict = tokenizer(__A , padding=__A )
lowerCAmelCase_ :Any = tokenizer.batch_encode_plus(__A , padding=__A )
# fmt: off
lowerCAmelCase_ :int = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowerCAmelCase_ :List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCAmelCase_ :int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __A )
self.assertListEqual(x_token.token_type_ids , __A )
self.assertListEqual(x_token.attention_mask , __A )
self.assertListEqual(x_token_a.input_ids , __A )
self.assertListEqual(x_token_a.token_type_ids , __A )
self.assertListEqual(x_token_a.attention_mask , __A )
def __lowerCAmelCase ( self ) -> Tuple:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __lowerCAmelCase ( self ) -> str:
# tokenizer has no padding token
pass
| 1 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 357 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__UpperCAmelCase = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
__UpperCAmelCase = dataset.iloc[:, 1:2].values
__UpperCAmelCase = dataset.iloc[:, 2].values
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0)
__UpperCAmelCase = PolynomialFeatures(degree=4)
__UpperCAmelCase = poly_reg.fit_transform(X)
__UpperCAmelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def _snake_case ( ) -> str:
'''simple docstring'''
plt.scatter(lowercase__ , lowercase__ , color="""red""" )
plt.plot(lowercase__ , pol_reg.predict(poly_reg.fit_transform(lowercase__ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 1 | 0 |
__UpperCAmelCase = 'Tobias Carryer'
from time import time
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A , __A , __A=int(time() ) ) -> Union[str, Any]: # noqa: B008
lowerCAmelCase_ :Tuple = multiplier
lowerCAmelCase_ :Union[str, Any] = increment
lowerCAmelCase_ :Any = modulo
lowerCAmelCase_ :Optional[int] = seed
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Any = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__UpperCAmelCase = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31)
while True:
print(lcg.next_number())
| 358 |
"""simple docstring"""
from __future__ import annotations
__UpperCAmelCase = 1.6021e-19 # units = C
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float , ) -> tuple[str, float]:
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 0 |
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = 0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def _snake_case ( lowercase__ : int = 1_0_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Tuple = 1
lowerCAmelCase_ :Any = 2
for i in range(2 , max_n + 1 ):
lowerCAmelCase_ :Dict = pre_numerator
lowerCAmelCase_ :List[Any] = 2 * i // 3 if i % 3 == 0 else 1
lowerCAmelCase_ :Tuple = cur_numerator
lowerCAmelCase_ :Any = e_cont * pre_numerator + temp
return sum_digits(a__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 359 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 1 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = [[0 for _ in range(lowercase__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowerCAmelCase_ :List[str] = 1
for n in range(m + 1 ):
for k in range(1 , lowercase__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__UpperCAmelCase = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 360 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _snake_case ( lowercase__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
lowerCAmelCase_ :Dict = f"""https://www.amazon.in/laptop/s?k={product}"""
lowerCAmelCase_ :List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
lowerCAmelCase_ :List[Any] = BeautifulSoup(requests.get(lowercase__ , headers=lowercase__ ).text )
# Initialize a Pandas dataframe with the column titles
lowerCAmelCase_ :Union[str, Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
lowerCAmelCase_ :str = item.ha.text
lowerCAmelCase_ :Dict = """https://www.amazon.in/""" + item.ha.a["""href"""]
lowerCAmelCase_ :int = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
lowerCAmelCase_ :Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
lowerCAmelCase_ :int = """Not available"""
try:
lowerCAmelCase_ :str = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
lowerCAmelCase_ :Optional[Any] = """"""
try:
lowerCAmelCase_ :str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_0_0 )
except ValueError:
lowerCAmelCase_ :Union[str, Any] = float("""nan""" )
except AttributeError:
pass
lowerCAmelCase_ :Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCAmelCase_ :List[Any] = """ """
lowerCAmelCase_ :Tuple = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__UpperCAmelCase = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 1 | 0 |
"""simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , __A , __A = True , __A = None , __A = 32 , __A = True , __A = 1 / 255 , __A = True , __A = True , __A = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __A = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __A = True , __A=7 , __A=30 , __A=400 , __A=3 , ) -> Tuple:
lowerCAmelCase_ :Tuple = parent
lowerCAmelCase_ :int = do_resize
lowerCAmelCase_ :Dict = size if size is not None else {"""shortest_edge""": 288}
lowerCAmelCase_ :Any = size_divisor
lowerCAmelCase_ :Tuple = do_rescale
lowerCAmelCase_ :Optional[Any] = rescale_factor
lowerCAmelCase_ :Dict = do_normalize
lowerCAmelCase_ :Dict = do_center_crop
lowerCAmelCase_ :List[str] = image_mean
lowerCAmelCase_ :Union[str, Any] = image_std
lowerCAmelCase_ :Tuple = do_pad
lowerCAmelCase_ :Dict = batch_size
lowerCAmelCase_ :Optional[Any] = num_channels
lowerCAmelCase_ :Any = min_resolution
lowerCAmelCase_ :int = max_resolution
def __lowerCAmelCase ( self ) -> List[str]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __lowerCAmelCase ( self , __A , __A=False ) -> List[Any]:
if not batched:
lowerCAmelCase_ :Dict = self.size["""shortest_edge"""]
lowerCAmelCase_ :List[str] = image_inputs[0]
if isinstance(_a , Image.Image ):
lowerCAmelCase_ :Union[str, Any] = image.size
else:
lowerCAmelCase_ :Optional[Any] = image.shape[1], image.shape[2]
lowerCAmelCase_ :List[Any] = size / min(_a , _a )
if h < w:
lowerCAmelCase_ :str = size, scale * w
else:
lowerCAmelCase_ :str = scale * h, size
lowerCAmelCase_ :str = int((1333 / 800) * size )
if max(_a , _a ) > max_size:
lowerCAmelCase_ :List[str] = max_size / max(_a , _a )
lowerCAmelCase_ :Any = newh * scale
lowerCAmelCase_ :Union[str, Any] = neww * scale
lowerCAmelCase_ :Dict = int(newh + 0.5 ), int(neww + 0.5 )
lowerCAmelCase_ :int = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
lowerCAmelCase_ :Dict = []
for image in image_inputs:
lowerCAmelCase_ :str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase_ :Tuple = max(_a , key=lambda __A : item[0] )[0]
lowerCAmelCase_ :Dict = max(_a , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( UpperCamelCase_ , unittest.TestCase ):
UpperCAmelCase_ :Dict = BridgeTowerImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :str = BridgeTowerImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
self.assertTrue(hasattr(_a , """size_divisor""" ) )
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
lowerCAmelCase_ :List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase_ :Any = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ :List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values
lowerCAmelCase_ :List[Any] = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
lowerCAmelCase_ :str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase_ :Union[str, Any] = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ :List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values
lowerCAmelCase_ :int = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
lowerCAmelCase_ :int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase_ :Optional[Any] = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ :Dict = image_processing(_a , return_tensors="""pt""" ).pixel_values
lowerCAmelCase_ :Optional[Any] = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 361 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Any = """laion/clap-htsat-unfused"""
lowerCAmelCase_ :Optional[Any] = tempfile.mkdtemp()
def __lowerCAmelCase ( self , **__A ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self , **__A ) -> Tuple:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :Optional[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Dict = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase_ :Dict = self.get_feature_extractor(do_normalize=__A , padding_value=1.0 )
lowerCAmelCase_ :Union[str, Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = self.get_feature_extractor()
lowerCAmelCase_ :str = self.get_tokenizer()
lowerCAmelCase_ :List[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :Optional[Any] = floats_list((3, 1000) )
lowerCAmelCase_ :Optional[Any] = feature_extractor(__A , return_tensors="""np""" )
lowerCAmelCase_ :str = processor(audios=__A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :List[Any] = """This is a test string"""
lowerCAmelCase_ :Dict = processor(text=__A )
lowerCAmelCase_ :List[str] = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :int = self.get_feature_extractor()
lowerCAmelCase_ :Tuple = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ :Tuple = processor.batch_decode(__A )
lowerCAmelCase_ :Optional[Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 1 | 0 |
"""simple docstring"""
import os
import string
import sys
__UpperCAmelCase = 1 << 8
__UpperCAmelCase = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
__UpperCAmelCase = KEYMAP['up']
__UpperCAmelCase = KEYMAP['left']
if sys.platform == "win32":
__UpperCAmelCase = []
__UpperCAmelCase = {
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
__UpperCAmelCase = ord(str(i))
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase_ :Dict = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowercase__ ) == 0:
# Read the keystroke
lowerCAmelCase_ :str = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase_ :Optional[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase_ :List[Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(lowercase__ )
if ord(lowercase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
lowerCAmelCase_ :List[Any] = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase_ :str = cha[1]
else:
lowerCAmelCase_ :str = ch.decode(lowercase__ )
else:
lowerCAmelCase_ :int = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase_ :Any = sys.stdin.fileno()
lowerCAmelCase_ :List[str] = termios.tcgetattr(lowercase__ )
try:
tty.setraw(lowercase__ )
lowerCAmelCase_ :str = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ )
return ch
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :int = get_raw_chars()
if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowercase__ ) == KEYMAP["esc"]:
lowerCAmelCase_ :Union[str, Any] = get_raw_chars()
if ord(lowercase__ ) == KEYMAP["mod_int"]:
lowerCAmelCase_ :Optional[Any] = get_raw_chars()
if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowercase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 362 |
"""simple docstring"""
import os
from math import logaa
def _snake_case ( lowercase__ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
lowerCAmelCase_ :float = 0
lowerCAmelCase_ :Union[str, Any] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = list(map(lowercase__ , line.split(""",""" ) ) )
if x * logaa(lowercase__ ) > largest:
lowerCAmelCase_ :Any = x * logaa(lowercase__ )
lowerCAmelCase_ :List[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCAmelCase_ :Union[str, Any] = StableDiffusionInpaintPipeline
UpperCAmelCase_ :Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase_ :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase_ :Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase_ :Optional[Any] = frozenset([] )
def __lowerCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase_ :List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase_ :Optional[Any] = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCAmelCase_ :List[Any] = CLIPTextModel(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ :Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Tuple = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> Optional[int]:
lowerCAmelCase_ :str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ :Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :Union[str, Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(_SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
lowerCAmelCase_ :Dict = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase_ :Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ :List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ :Optional[Any] = self.get_dummy_components()
lowerCAmelCase_ :str = StableDiffusionInpaintPipeline(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ :Dict = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ :List[str] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ :int = sd_pipe(**_SCREAMING_SNAKE_CASE ).images
lowerCAmelCase_ :str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ :str = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowerCAmelCase_ :List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowerCAmelCase_ :Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
lowerCAmelCase_ :Optional[int] = "stabilityai/stable-diffusion-2-inpainting"
lowerCAmelCase_ :str = StableDiffusionInpaintPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
lowerCAmelCase_ :Any = "Face of a yellow cat, high resolution, sitting on a park bench"
lowerCAmelCase_ :Dict = torch.manual_seed(0 )
lowerCAmelCase_ :Any = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , output_type="""np""" , )
lowerCAmelCase_ :Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowerCAmelCase_ :Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowerCAmelCase_ :Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
lowerCAmelCase_ :List[str] = "stabilityai/stable-diffusion-2-inpainting"
lowerCAmelCase_ :List[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , safety_checker=_SCREAMING_SNAKE_CASE , )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
lowerCAmelCase_ :Dict = "Face of a yellow cat, high resolution, sitting on a park bench"
lowerCAmelCase_ :List[Any] = torch.manual_seed(0 )
lowerCAmelCase_ :str = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , output_type="""np""" , )
lowerCAmelCase_ :Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __lowerCAmelCase ( self ) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase_ :Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowerCAmelCase_ :List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowerCAmelCase_ :Union[str, Any] = "stabilityai/stable-diffusion-2-inpainting"
lowerCAmelCase_ :int = PNDMScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder="""scheduler""" )
lowerCAmelCase_ :Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ :str = "Face of a yellow cat, high resolution, sitting on a park bench"
lowerCAmelCase_ :Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase_ :int = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""np""" , )
lowerCAmelCase_ :Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 363 |
"""simple docstring"""
import itertools
import math
def _snake_case ( lowercase__ : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = 2
while True:
if is_prime(lowercase__ ):
yield num
num += 1
def _snake_case ( lowercase__ : int = 1_0_0_0_1 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , lowercase__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 | 0 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
lowerCAmelCase_ :int = DatasetInfosDict.from_directory(_a )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , ),
] , )
def _snake_case ( lowercase__ : Any , lowercase__ : DatasetInfo ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = str(_a )
dataset_info.write_to_directory(_a )
lowerCAmelCase_ :Tuple = DatasetInfo.from_directory(_a )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_a , """dataset_info.json""" ) )
def _snake_case ( ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
lowerCAmelCase_ :Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(_a ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowerCAmelCase_ :List[Any] = yaml.safe_dump(_a )
lowerCAmelCase_ :Any = yaml.safe_load(_a )
assert dataset_info_yaml_dict == reloaded
def _snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = DatasetInfo()
lowerCAmelCase_ :Any = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=4_2 ),
"""v2""": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def _snake_case ( lowercase__ : int , lowercase__ : DatasetInfosDict ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :Dict = str(_a )
dataset_infos_dict.write_to_directory(_a )
lowerCAmelCase_ :Any = DatasetInfosDict.from_directory(_a )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCAmelCase_ :int = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCAmelCase_ :Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_a , """README.md""" ) )
| 364 |
"""simple docstring"""
def _snake_case ( lowercase__ : int = 5_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 | 0 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Any , lowercase__ : str = 1_6 , lowercase__ : Optional[Any] = "bert-base-cased" ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ :Union[str, Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ :Tuple = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=SCREAMING_SNAKE_CASE_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase_ :Union[str, Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ :Tuple = DataLoader(
tokenized_datasets["""validation"""] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
def _snake_case ( lowercase__ : int , lowercase__ : Tuple , lowercase__ : Union[str, Any] , lowercase__ : List[str] ) -> Any:
'''simple docstring'''
model.eval()
lowerCAmelCase_ :Any = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[Any] = model(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ :List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE_ ) - 1:
lowerCAmelCase_ :int = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase_ :int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ :Optional[int] = metric.compute()
return eval_metric["accuracy"]
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :Dict = config["""lr"""]
lowerCAmelCase_ :Optional[int] = int(config["""num_epochs"""] )
lowerCAmelCase_ :Dict = int(config["""seed"""] )
lowerCAmelCase_ :int = int(config["""batch_size"""] )
lowerCAmelCase_ :str = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :Optional[Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
# Instantiate optimizer
lowerCAmelCase_ :List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ :Optional[int] = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ :int = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCAmelCase_ :List[Any] = 1
lowerCAmelCase_ :int = (len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ :List[str] = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE_ , )
else:
lowerCAmelCase_ :str = DummyScheduler(SCREAMING_SNAKE_CASE_ , total_num_steps=SCREAMING_SNAKE_CASE_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Dict = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ :Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :List[str] = evaluate.load("""glue""" , """mrpc""" )
lowerCAmelCase_ :Union[str, Any] = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase_ :List[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase_ :Dict = args.resume_from_checkpoint.split("""epoch_""" )[1]
lowerCAmelCase_ :Tuple = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase_ :List[Any] = int(SCREAMING_SNAKE_CASE_ ) + 1
lowerCAmelCase_ :str = evaluation_loop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.print("""resumed checkpoint performance:""" , SCREAMING_SNAKE_CASE_ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
lowerCAmelCase_ :int = json.load(SCREAMING_SNAKE_CASE_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase_ :List[str] = {}
for epoch in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ :Any = model(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ :Any = outputs.loss
lowerCAmelCase_ :List[str] = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase_ :Any = f"""epoch_{epoch}"""
lowerCAmelCase_ :Union[str, Any] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ :Optional[Any] = evaluation_loop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ :List[str] = accuracy
lowerCAmelCase_ :Tuple = lr_scheduler.get_lr()[0]
lowerCAmelCase_ :List[str] = optimizer.param_groups[0]["""lr"""]
lowerCAmelCase_ :List[Any] = epoch
lowerCAmelCase_ :Any = overall_step
accelerator.print(f"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=SCREAMING_SNAKE_CASE_ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=SCREAMING_SNAKE_CASE_ , )
parser.add_argument(
"""--output_dir""" , type=SCREAMING_SNAKE_CASE_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=SCREAMING_SNAKE_CASE_ , default=2 , help="""Number of train epochs.""" , )
lowerCAmelCase_ :List[str] = parser.parse_args()
lowerCAmelCase_ :str = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 365 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
UpperCAmelCase_ :Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ :Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :List[Any] = CLIPTextModel(__A )
lowerCAmelCase_ :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Union[str, Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> List[str]:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Tuple = torch.manual_seed(__A )
else:
lowerCAmelCase_ :Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :List[Any] = 2
lowerCAmelCase_ :int = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , )
lowerCAmelCase_ :Optional[int] = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :List[str] = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__A ):
if isinstance(__A , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :str = CLIPTextModel(__A )
lowerCAmelCase_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Optional[Any] = MultiControlNetModel([controlneta, controlneta] )
lowerCAmelCase_ :List[Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> str:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Optional[Any] = torch.manual_seed(__A )
else:
lowerCAmelCase_ :List[Any] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :Optional[Any] = 2
lowerCAmelCase_ :Optional[int] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
]
lowerCAmelCase_ :int = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
lowerCAmelCase_ :Union[str, Any] = 1_0.0
lowerCAmelCase_ :Union[str, Any] = 4
lowerCAmelCase_ :Tuple = self.get_dummy_inputs(__A )
lowerCAmelCase_ :List[str] = steps
lowerCAmelCase_ :int = scale
lowerCAmelCase_ :Union[str, Any] = pipe(**__A )[0]
lowerCAmelCase_ :Any = self.get_dummy_inputs(__A )
lowerCAmelCase_ :str = steps
lowerCAmelCase_ :str = scale
lowerCAmelCase_ :Tuple = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Union[str, Any] = steps
lowerCAmelCase_ :Union[str, Any] = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowerCAmelCase_ :List[str] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Optional[int] = steps
lowerCAmelCase_ :Tuple = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowerCAmelCase ( self ) -> Dict:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> Optional[int]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :str = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__A )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Any = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
lowerCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=__A , controlnet=__A )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase_ :List[Any] = """evil space-punk bird"""
lowerCAmelCase_ :List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
lowerCAmelCase_ :int = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
lowerCAmelCase_ :Union[str, Any] = pipe(
__A , __A , control_image=__A , generator=__A , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
lowerCAmelCase_ :Tuple = output.images[0]
assert image.shape == (512, 512, 3)
lowerCAmelCase_ :Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 1 | 0 |
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Dict ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
# set absolute/relative position embeddings parameter
lowerCAmelCase_ :Optional[int] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCAmelCase_ :Dict = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
elif task == "WTQ":
# run_task_main.py hparams
lowerCAmelCase_ :Tuple = 4
lowerCAmelCase_ :int = True
# hparam_utils.py hparams
lowerCAmelCase_ :Union[str, Any] = 0.664694
lowerCAmelCase_ :Optional[Any] = 0.207951
lowerCAmelCase_ :Union[str, Any] = 0.121194
lowerCAmelCase_ :List[str] = True
lowerCAmelCase_ :Optional[Any] = True
lowerCAmelCase_ :Tuple = False
lowerCAmelCase_ :Dict = 0.0352513
lowerCAmelCase_ :Union[str, Any] = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCAmelCase_ :Tuple = 4
lowerCAmelCase_ :Dict = False
# hparam_utils.py hparams
lowerCAmelCase_ :int = 36.4519
lowerCAmelCase_ :int = 0.903421
lowerCAmelCase_ :Union[str, Any] = 2_2_2.0_8_8
lowerCAmelCase_ :Any = True
lowerCAmelCase_ :List[str] = True
lowerCAmelCase_ :Optional[int] = True
lowerCAmelCase_ :Any = 0.763141
lowerCAmelCase_ :Union[str, Any] = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
elif task == "TABFACT":
lowerCAmelCase_ :Any = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE__ )
elif task == "MLM":
lowerCAmelCase_ :Optional[Any] = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCAmelCase_ :Union[str, Any] = TapasModel(config=SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
lowerCAmelCase_ :Union[str, Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-1_0] + """vocab.txt""" , model_max_length=5_1_2 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 366 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ ):
UpperCAmelCase_ :List[str] = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self , __A , __A , __A = None , __A = 5_0257 , __A = 1024 , __A = 768 , __A = 12 , __A = 12 , __A = None , __A = "gelu_new" , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 1E-5 , __A = 0.0_2 , __A = True , __A = True , __A = False , __A = False , ) -> Optional[Any]:
super().__init__()
lowerCAmelCase_ :List[str] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
lowerCAmelCase_ :Union[str, Any] = prefix_inner_dim
lowerCAmelCase_ :str = prefix_hidden_dim
lowerCAmelCase_ :str = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCAmelCase_ :List[Any] = (
nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCAmelCase_ :Any = GPTaConfig(
vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , )
lowerCAmelCase_ :Any = GPTaLMHeadModel(__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , __A = None , ) -> List[str]:
lowerCAmelCase_ :str = self.transformer.transformer.wte(__A )
lowerCAmelCase_ :Any = self.encode_prefix(__A )
lowerCAmelCase_ :Optional[Any] = self.decode_prefix(__A )
lowerCAmelCase_ :Optional[int] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCAmelCase_ :int = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCAmelCase_ :Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCAmelCase_ :Tuple = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __lowerCAmelCase ( self , __A , __A ) -> torch.Tensor:
return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
return self.encode_prefix(__A )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[int]:
lowerCAmelCase_ :Tuple = torch.split(__A , 1 , dim=0 )
lowerCAmelCase_ :Optional[int] = []
lowerCAmelCase_ :List[str] = []
for feature in features:
lowerCAmelCase_ :Tuple = self.decode_prefix(feature.to(__A ) ) # back to the clip feature
# Only support beam search for now
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.generate_beam(
input_embeds=__A , device=__A , eos_token_id=__A )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCAmelCase_ :Tuple = torch.stack(__A )
lowerCAmelCase_ :int = torch.stack(__A )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A = 5 , __A = 67 , __A = 1.0 , __A = None , ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = eos_token_id
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :Any = None
lowerCAmelCase_ :int = torch.ones(__A , device=__A , dtype=torch.int )
lowerCAmelCase_ :Optional[int] = torch.zeros(__A , device=__A , dtype=torch.bool )
if input_embeds is not None:
lowerCAmelCase_ :List[str] = input_embeds
else:
lowerCAmelCase_ :Union[str, Any] = self.transformer.transformer.wte(__A )
for i in range(__A ):
lowerCAmelCase_ :Optional[int] = self.transformer(inputs_embeds=__A )
lowerCAmelCase_ :str = outputs.logits
lowerCAmelCase_ :str = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCAmelCase_ :Dict = logits.softmax(-1 ).log()
if scores is None:
lowerCAmelCase_ , lowerCAmelCase_ :Any = logits.topk(__A , -1 )
lowerCAmelCase_ :Union[str, Any] = generated.expand(__A , *generated.shape[1:] )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCAmelCase_ :List[str] = next_tokens
else:
lowerCAmelCase_ :List[Any] = tokens.expand(__A , *tokens.shape[1:] )
lowerCAmelCase_ :Any = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCAmelCase_ :List[Any] = -float(np.inf )
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Optional[int] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCAmelCase_ :List[Any] = scores_sum / seq_lengths[:, None]
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = scores_sum_average.view(-1 ).topk(__A , -1 )
lowerCAmelCase_ :Optional[Any] = next_tokens // scores_sum.shape[1]
lowerCAmelCase_ :Dict = seq_lengths[next_tokens_source]
lowerCAmelCase_ :Tuple = next_tokens % scores_sum.shape[1]
lowerCAmelCase_ :Optional[Any] = next_tokens.unsqueeze(1 )
lowerCAmelCase_ :str = tokens[next_tokens_source]
lowerCAmelCase_ :List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
lowerCAmelCase_ :Dict = generated[next_tokens_source]
lowerCAmelCase_ :Dict = scores_sum_average * seq_lengths
lowerCAmelCase_ :Tuple = is_stopped[next_tokens_source]
lowerCAmelCase_ :str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCAmelCase_ :List[Any] = torch.cat((generated, next_token_embed) , dim=1 )
lowerCAmelCase_ :Optional[int] = is_stopped + next_tokens.eq(__A ).squeeze()
if is_stopped.all():
break
lowerCAmelCase_ :str = scores / seq_lengths
lowerCAmelCase_ :Optional[int] = scores.argsort(descending=__A )
# tokens tensors are already padded to max_seq_length
lowerCAmelCase_ :Optional[Any] = [tokens[i] for i in order]
lowerCAmelCase_ :Dict = torch.stack(__A , dim=0 )
lowerCAmelCase_ :Tuple = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 1 | 0 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class _SCREAMING_SNAKE_CASE :
def __init__( self ) -> str:
lowerCAmelCase_ :Optional[int] = [2, 1, 2, -1]
lowerCAmelCase_ :List[str] = [1, 2, 3, 4]
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :int = len(self.first_signal )
lowerCAmelCase_ :Tuple = len(self.second_signal )
lowerCAmelCase_ :Union[str, Any] = max(__lowerCamelCase , __lowerCamelCase )
# create a zero matrix of max_length x max_length
lowerCAmelCase_ :Tuple = [[0] * max_length for i in range(__lowerCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__lowerCamelCase ):
lowerCAmelCase_ :List[Any] = deque(self.second_signal )
rotated_signal.rotate(__lowerCamelCase )
for j, item in enumerate(__lowerCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowerCAmelCase_ :Any = np.matmul(np.transpose(__lowerCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__lowerCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 367 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "detr"
UpperCAmelCase_ :str = ["past_key_values"]
UpperCAmelCase_ :Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __A=True , __A=None , __A=3 , __A=100 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.0_2 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=1 , __A=5 , __A=2 , __A=1 , __A=1 , __A=5 , __A=2 , __A=0.1 , **__A , ) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase_ :int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__A , __A ):
lowerCAmelCase_ :str = backbone_config.get("""model_type""" )
lowerCAmelCase_ :List[Any] = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ :Optional[Any] = config_class.from_dict(__A )
# set timm attributes to None
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = None, None, None
lowerCAmelCase_ :Tuple = use_timm_backbone
lowerCAmelCase_ :Optional[int] = backbone_config
lowerCAmelCase_ :Optional[int] = num_channels
lowerCAmelCase_ :int = num_queries
lowerCAmelCase_ :List[Any] = d_model
lowerCAmelCase_ :Optional[int] = encoder_ffn_dim
lowerCAmelCase_ :Tuple = encoder_layers
lowerCAmelCase_ :int = encoder_attention_heads
lowerCAmelCase_ :Optional[Any] = decoder_ffn_dim
lowerCAmelCase_ :List[str] = decoder_layers
lowerCAmelCase_ :Dict = decoder_attention_heads
lowerCAmelCase_ :Dict = dropout
lowerCAmelCase_ :Tuple = attention_dropout
lowerCAmelCase_ :Union[str, Any] = activation_dropout
lowerCAmelCase_ :Any = activation_function
lowerCAmelCase_ :List[str] = init_std
lowerCAmelCase_ :Optional[int] = init_xavier_std
lowerCAmelCase_ :int = encoder_layerdrop
lowerCAmelCase_ :Union[str, Any] = decoder_layerdrop
lowerCAmelCase_ :List[str] = encoder_layers
lowerCAmelCase_ :Union[str, Any] = auxiliary_loss
lowerCAmelCase_ :str = position_embedding_type
lowerCAmelCase_ :List[Any] = backbone
lowerCAmelCase_ :str = use_pretrained_backbone
lowerCAmelCase_ :str = dilation
# Hungarian matcher
lowerCAmelCase_ :List[Any] = class_cost
lowerCAmelCase_ :Union[str, Any] = bbox_cost
lowerCAmelCase_ :Tuple = giou_cost
# Loss coefficients
lowerCAmelCase_ :Optional[int] = mask_loss_coefficient
lowerCAmelCase_ :Union[str, Any] = dice_loss_coefficient
lowerCAmelCase_ :Tuple = bbox_loss_coefficient
lowerCAmelCase_ :Tuple = giou_loss_coefficient
lowerCAmelCase_ :Dict = eos_coefficient
super().__init__(is_encoder_decoder=__A , **__A )
@property
def __lowerCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) -> int:
return self.d_model
@classmethod
def __lowerCAmelCase ( cls , __A , **__A ) -> Any:
return cls(backbone_config=__A , **__A )
def __lowerCAmelCase ( self ) -> Dict[str, any]:
lowerCAmelCase_ :List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase_ :Dict = self.backbone_config.to_dict()
lowerCAmelCase_ :str = self.__class__.model_type
return output
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[Any] = version.parse("1.11" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __lowerCAmelCase ( self ) -> float:
return 1E-5
@property
def __lowerCAmelCase ( self ) -> int:
return 12
| 1 | 0 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _SCREAMING_SNAKE_CASE ( pl.LightningModule ):
def __init__( self , __A ) -> List[Any]:
super().__init__()
lowerCAmelCase_ :int = model
lowerCAmelCase_ :int = 2
lowerCAmelCase_ :int = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __lowerCAmelCase ( self ) -> List[str]:
pass
def _snake_case ( lowercase__ : Dict , lowercase__ : int , lowercase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :int = LongformerModel.from_pretrained(a__ )
lowerCAmelCase_ :Dict = LightningModel(a__ )
lowerCAmelCase_ :Optional[int] = torch.load(a__ , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
lowerCAmelCase_ :Dict = LongformerForQuestionAnswering.from_pretrained(a__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__ )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCAmelCase = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 368 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['DeiTFeatureExtractor']
__UpperCAmelCase = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 0 |
"""simple docstring"""
import math
def _snake_case ( lowercase__ : int ) -> list:
'''simple docstring'''
lowerCAmelCase_ :Any = [True] * n
lowerCAmelCase_ :Any = False
lowerCAmelCase_ :Dict = False
lowerCAmelCase_ :Dict = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowerCAmelCase_ :List[Any] = i * 2
while index < n:
lowerCAmelCase_ :List[Any] = False
lowerCAmelCase_ :Any = index + i
lowerCAmelCase_ :Dict = [2]
for i in range(3 , _lowerCamelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCamelCase )
return primes
def _snake_case ( lowercase__ : int = 9_9_9_9_6_6_6_6_3_3_3_3 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Any = math.floor(math.sqrt(_lowerCamelCase ) ) + 1_0_0
lowerCAmelCase_ :Optional[int] = prime_sieve(_lowerCamelCase )
lowerCAmelCase_ :Optional[Any] = 0
lowerCAmelCase_ :Tuple = 0
lowerCAmelCase_ :str = primes[prime_index]
while (last_prime**2) <= limit:
lowerCAmelCase_ :Optional[int] = primes[prime_index + 1]
lowerCAmelCase_ :List[Any] = last_prime**2
lowerCAmelCase_ :int = next_prime**2
# Get numbers divisible by lps(current)
lowerCAmelCase_ :Dict = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCAmelCase_ :Union[str, Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCAmelCase_ :Dict = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCAmelCase_ :Union[str, Any] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 369 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 370 |
"""simple docstring"""
__UpperCAmelCase = 2_56
# Modulus to hash a string
__UpperCAmelCase = 1_00_00_03
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :Tuple = len(lowercase__ )
lowerCAmelCase_ :List[str] = len(lowercase__ )
if p_len > t_len:
return False
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :Any = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase__ ):
lowerCAmelCase_ :int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCAmelCase_ :Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCAmelCase_ :Optional[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCAmelCase_ :Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _snake_case ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ :int = """abc1abc12"""
lowerCAmelCase_ :Dict = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowerCAmelCase_ :int = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(lowercase__ , lowercase__ ) and not rabin_karp(lowercase__ , lowercase__ )
# Test 2)
lowerCAmelCase_ :Dict = """ABABX"""
lowerCAmelCase_ :int = """ABABZABABYABABX"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 3)
lowerCAmelCase_ :Union[str, Any] = """AAAB"""
lowerCAmelCase_ :List[str] = """ABAAAAAB"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 4)
lowerCAmelCase_ :Dict = """abcdabcy"""
lowerCAmelCase_ :Union[str, Any] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 5)
lowerCAmelCase_ :Optional[int] = """Lü"""
lowerCAmelCase_ :Optional[int] = """Lüsai"""
assert rabin_karp(lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[int] = """Lue"""
assert not rabin_karp(lowercase__ , lowercase__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 1 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class _SCREAMING_SNAKE_CASE :
def __init__( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = {}
def __lowerCAmelCase ( self , __A ) -> None:
lowerCAmelCase_ :int = {}
def __lowerCAmelCase ( self , __A , __A , __A ) -> None:
if nodea not in self.connections:
self.add_node(__A )
if nodea not in self.connections:
self.add_node(__A )
lowerCAmelCase_ :Dict = probability
def __lowerCAmelCase ( self ) -> list[str]:
return list(self.connections )
def __lowerCAmelCase ( self , __A ) -> str:
lowerCAmelCase_ :Union[str, Any] = 0
lowerCAmelCase_ :int = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def _snake_case ( lowercase__ : str , lowercase__ : list[tuple[str, str, float]] , lowercase__ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :str = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_ :Dict = Counter(graph.get_nodes() )
lowerCAmelCase_ :Union[str, Any] = start
for _ in range(lowerCamelCase__ ):
lowerCAmelCase_ :Tuple = graph.transition(lowerCamelCase__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ :Optional[Any] = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :str = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ :int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ :List[Any] = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ :List[str] = 8
else:
lowerCAmelCase_ :Optional[int] = None
return tokenizer.pad(
lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1":
lowerCAmelCase_ :Optional[Any] = 2
# New Code #
lowerCAmelCase_ :List[str] = int(args.gradient_accumulation_steps )
lowerCAmelCase_ :int = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase_ :str = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :int = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ :Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ :Optional[Any] = AdamW(params=model.parameters() , lr=lowercase__ )
# Instantiate scheduler
lowerCAmelCase_ :Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
with LocalSGD(
accelerator=lowercase__ , model=lowercase__ , local_sgd_steps=lowercase__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase__ ):
lowerCAmelCase_ :str = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = output.loss
accelerator.backward(lowercase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
def _snake_case ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowercase__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=lowercase__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase_ :Optional[Any] = parser.parse_args()
lowerCAmelCase_ :Tuple = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCAmelCase_ :Any = RoCBertTokenizer
UpperCAmelCase_ :Optional[int] = None
UpperCAmelCase_ :Optional[int] = False
UpperCAmelCase_ :Optional[Any] = True
UpperCAmelCase_ :int = filter_non_english
def __lowerCAmelCase ( self ) -> Dict:
super().setUp()
lowerCAmelCase_ :List[Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
lowerCAmelCase_ :int = {}
lowerCAmelCase_ :Union[str, Any] = {}
for i, value in enumerate(_snake_case ):
lowerCAmelCase_ :Optional[int] = i
lowerCAmelCase_ :Any = i
lowerCAmelCase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] )
lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer:
json.dump(_snake_case , _snake_case , ensure_ascii=_snake_case )
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(_snake_case , _snake_case , ensure_ascii=_snake_case )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[str] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase_ :Optional[Any] = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(_snake_case , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_snake_case ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_snake_case ) , [5, 6, 2, 5, 7, 8] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[Any] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :List[str] = RoCBertBasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = RoCBertBasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[str] = RoCBertBasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :str = RoCBertBasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Dict = RoCBertBasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :str = RoCBertBasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :str = RoCBertBasicTokenizer(do_lower_case=_snake_case , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :str = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCAmelCase_ :List[str] = {}
for i, token in enumerate(_snake_case ):
lowerCAmelCase_ :int = i
lowerCAmelCase_ :Optional[int] = RoCBertWordpieceTokenizer(vocab=_snake_case , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __lowerCAmelCase ( self ) -> int:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __lowerCAmelCase ( self ) -> Any:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Union[str, Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_snake_case ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
lowerCAmelCase_ :Union[str, Any] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_snake_case ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ :int = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
lowerCAmelCase_ :Optional[int] = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowerCAmelCase_ :Optional[Any] = tokenizer_r.encode_plus(
_snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case , )
lowerCAmelCase_ :List[Any] = tokenizer_r.do_lower_case if hasattr(_snake_case , """do_lower_case""" ) else False
lowerCAmelCase_ :List[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :str = ["的", "人", "有"]
lowerCAmelCase_ :str = "".join(_snake_case )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ :Optional[Any] = True
lowerCAmelCase_ :List[str] = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
lowerCAmelCase_ :Tuple = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
lowerCAmelCase_ :Optional[int] = tokenizer_p.encode(_snake_case , add_special_tokens=_snake_case )
lowerCAmelCase_ :Any = tokenizer_r.encode(_snake_case , add_special_tokens=_snake_case )
lowerCAmelCase_ :Union[str, Any] = tokenizer_r.convert_ids_to_tokens(_snake_case )
lowerCAmelCase_ :str = tokenizer_p.convert_ids_to_tokens(_snake_case )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_snake_case , _snake_case )
self.assertListEqual(_snake_case , _snake_case )
lowerCAmelCase_ :Tuple = False
lowerCAmelCase_ :Optional[int] = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
lowerCAmelCase_ :Optional[int] = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
lowerCAmelCase_ :List[Any] = tokenizer_r.encode(_snake_case , add_special_tokens=_snake_case )
lowerCAmelCase_ :str = tokenizer_p.encode(_snake_case , add_special_tokens=_snake_case )
lowerCAmelCase_ :str = tokenizer_r.convert_ids_to_tokens(_snake_case )
lowerCAmelCase_ :List[str] = tokenizer_p.convert_ids_to_tokens(_snake_case )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase_ :Union[str, Any] = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(_snake_case )
]
self.assertListEqual(_snake_case , _snake_case )
self.assertListEqual(_snake_case , _snake_case )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Optional[int] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase_ :List[Any] = tokenizer.encode("""你好""" , add_special_tokens=_snake_case )
lowerCAmelCase_ :str = tokenizer.encode("""你是谁""" , add_special_tokens=_snake_case )
lowerCAmelCase_ :Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case )
lowerCAmelCase_ :Optional[int] = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Tuple = self.get_tokenizers(do_lower_case=_snake_case )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase_ :str = "你好,你是谁"
lowerCAmelCase_ :int = tokenizer.tokenize(_snake_case )
lowerCAmelCase_ :Optional[int] = tokenizer.convert_tokens_to_ids(_snake_case )
lowerCAmelCase_ :List[Any] = tokenizer.convert_tokens_to_shape_ids(_snake_case )
lowerCAmelCase_ :Tuple = tokenizer.convert_tokens_to_pronunciation_ids(_snake_case )
lowerCAmelCase_ :List[str] = tokenizer.prepare_for_model(
_snake_case , _snake_case , _snake_case , add_special_tokens=_snake_case )
lowerCAmelCase_ :List[str] = tokenizer.encode_plus(_snake_case , add_special_tokens=_snake_case )
self.assertEqual(_snake_case , _snake_case )
| 350 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 , lowercase__ : str = "bert-base-cased" ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained(lowercase__ )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ :str = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : int ) -> List[str]:
'''simple docstring'''
model.eval()
lowerCAmelCase_ :Dict = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
lowerCAmelCase_ :Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase_ :Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Tuple = metric.compute()
return eval_metric["accuracy"]
def _snake_case ( lowercase__ : str , lowercase__ : List[str] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :Optional[int] = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Optional[Any] = args.model_name_or_path
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :str = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
lowerCAmelCase_ :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ :str = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ :Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCAmelCase_ :Any = 1
lowerCAmelCase_ :str = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ :List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
lowerCAmelCase_ :int = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ :List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ :List[Any] = 0
lowerCAmelCase_ :str = evaluate.load("""glue""" , """mrpc""" )
lowerCAmelCase_ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase_ :Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase_ :Optional[Any] = args.resume_from_checkpoint.split("""epoch_""" )[1]
lowerCAmelCase_ :int = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase_ :Union[str, Any] = int(lowercase__ ) + 1
lowerCAmelCase_ :Optional[int] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.print("""resumed checkpoint performance:""" , lowercase__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
lowerCAmelCase_ :List[str] = json.load(lowercase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase_ :List[Any] = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Dict = outputs.loss
lowerCAmelCase_ :int = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase_ :List[str] = f"""epoch_{epoch}"""
lowerCAmelCase_ :Any = os.path.join(args.output_dir , lowercase__ )
accelerator.save_state(lowercase__ )
lowerCAmelCase_ :List[Any] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :Union[str, Any] = accuracy
lowerCAmelCase_ :Any = lr_scheduler.get_lr()[0]
lowerCAmelCase_ :str = optimizer.param_groups[0]["""lr"""]
lowerCAmelCase_ :List[Any] = epoch
lowerCAmelCase_ :Tuple = overall_step
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def _snake_case ( ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowercase__ , default=lowercase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowercase__ , default=lowercase__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=2 , help="""Number of train epochs.""" , )
lowerCAmelCase_ :Optional[int] = parser.parse_args()
lowerCAmelCase_ :List[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 0 |
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__UpperCAmelCase = get_logger(__name__)
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :Optional[int] = "dummy_data"
UpperCAmelCase_ :Union[str, Any] = "datasets"
UpperCAmelCase_ :str = False
def __init__( self , __A , __A , __A , __A = None , __A = False , __A = True , __A = None , ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = 0
lowerCAmelCase_ :List[str] = dataset_name
lowerCAmelCase_ :Optional[Any] = cache_dir
lowerCAmelCase_ :Tuple = use_local_dummy_data
lowerCAmelCase_ :List[str] = config
# download_callbacks take a single url as input
lowerCAmelCase_ :List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowerCAmelCase_ :Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowerCAmelCase_ :Tuple = str(__A )
# to be downloaded
lowerCAmelCase_ :Any = None
lowerCAmelCase_ :Union[str, Any] = None
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
if self._dummy_file is None:
lowerCAmelCase_ :int = self.download_dummy_data()
return self._dummy_file
@property
def __lowerCAmelCase ( self ) -> Dict:
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def __lowerCAmelCase ( self ) -> Dict:
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Tuple = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowerCAmelCase_ :Dict = cached_path(
__A , cache_dir=self.cache_dir , extract_compressed_file=__A , force_extract=__A )
return os.path.join(__A , self.dummy_file_name )
@property
def __lowerCAmelCase ( self ) -> Tuple:
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __lowerCAmelCase ( self ) -> int:
if self._bucket_url is None:
lowerCAmelCase_ :Optional[int] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def __lowerCAmelCase ( self , __A , *__A ) -> str:
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowerCAmelCase_ :Any = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowerCAmelCase_ :Tuple = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__A , __A ):
return self.create_dummy_data_dict(__A , __A )
elif isinstance(__A , (list, tuple) ):
return self.create_dummy_data_list(__A , __A )
else:
return self.create_dummy_data_single(__A , __A )
def __lowerCAmelCase ( self , __A , *__A ) -> str:
return self.download_and_extract(__A )
def __lowerCAmelCase ( self , __A , __A ) -> int:
return self.download_and_extract(__A )
def __lowerCAmelCase ( self , __A , *__A , **__A ) -> Union[str, Any]:
return path
def __lowerCAmelCase ( self ) -> Optional[Any]:
return {}
def __lowerCAmelCase ( self , __A , __A ) -> Any:
lowerCAmelCase_ :Optional[Any] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__A , __A ):
for single_url in single_urls:
download_callback(__A )
else:
lowerCAmelCase_ :List[str] = single_urls
download_callback(__A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__A , __A ):
lowerCAmelCase_ :Optional[int] = [os.path.join(__A , urllib.parse.quote_plus(Path(__A ).name ) ) for x in single_urls]
else:
lowerCAmelCase_ :Any = single_urls
lowerCAmelCase_ :Union[str, Any] = os.path.join(__A , urllib.parse.quote_plus(Path(__A ).name ) )
lowerCAmelCase_ :Optional[int] = value
# make sure that values are unique
if all(isinstance(__A , __A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowerCAmelCase_ :Dict = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __lowerCAmelCase ( self , __A , __A ) -> Optional[Any]:
lowerCAmelCase_ :str = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowerCAmelCase_ :int = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __A ) ) for url in data_url )
lowerCAmelCase_ :Optional[Any] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowerCAmelCase_ :Optional[Any] = [data_url[0]] * len(__A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase_ :List[str] = os.path.join(__A , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__A )
return dummy_data_list
def __lowerCAmelCase ( self , __A , __A ) -> List[Any]:
for download_callback in self.download_callbacks:
download_callback(__A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase_ :Tuple = os.path.join(__A , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __lowerCAmelCase ( self ) -> int:
pass
def __lowerCAmelCase ( self ) -> List[Any]:
pass
def __lowerCAmelCase ( self , __A ) -> List[Any]:
def _iter_archive_members(__A ):
# this preserves the order of the members inside the ZIP archive
lowerCAmelCase_ :Tuple = Path(self.dummy_file ).parent
lowerCAmelCase_ :List[str] = path.relative_to(__A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowerCAmelCase_ :Union[str, Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__A )
lowerCAmelCase_ :Union[str, Any] = Path(__A )
lowerCAmelCase_ :Optional[int] = _iter_archive_members(__A ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__A ).as_posix(), file_path.open("""rb""" )
def __lowerCAmelCase ( self , __A ) -> Tuple:
if not isinstance(__A , __A ):
lowerCAmelCase_ :Tuple = [paths]
for path in paths:
if os.path.isfile(__A ):
if os.path.basename(__A ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__A ):
if os.path.basename(__A ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__A ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__A , __A )
| 351 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Union[str, Any]:
if isinstance(__A , __A ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowerCAmelCase_ :Tuple = deepcopy(__A )
elif os.path.exists(__A ):
with io.open(__A , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase_ :str = json.load(__A )
else:
try:
lowerCAmelCase_ :Dict = baseaa.urlsafe_baadecode(__A ).decode("""utf-8""" )
lowerCAmelCase_ :int = json.loads(__A )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
lowerCAmelCase_ :Optional[Any] = config
self.set_stage_and_offload()
def __lowerCAmelCase ( self ) -> Tuple:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowerCAmelCase_ :Tuple = self.get_value("""zero_optimization.stage""" , -1 )
# offload
lowerCAmelCase_ :Dict = False
if self.is_zeroa() or self.is_zeroa():
lowerCAmelCase_ :Optional[int] = set(["""cpu""", """nvme"""] )
lowerCAmelCase_ :Union[str, Any] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowerCAmelCase_ :Optional[int] = True
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
lowerCAmelCase_ :str = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Tuple = ds_key_long.split(""".""" )
lowerCAmelCase_ :List[str] = nodes.pop()
for node in nodes:
lowerCAmelCase_ :Tuple = config.get(__A )
if config is None:
return None, ds_key
return config, ds_key
def __lowerCAmelCase ( self , __A , __A=None ) -> Optional[Any]:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.find_config_node(__A )
if config is None:
return default
return config.get(__A , __A )
def __lowerCAmelCase ( self , __A , __A=False ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Union[str, Any] = ds_key_long.split(""".""" )
for node in nodes:
lowerCAmelCase_ :int = config
lowerCAmelCase_ :Any = config.get(__A )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__A )
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = self.get_value(__A )
return False if value is None else bool(__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
lowerCAmelCase_ :List[str] = self.get_value(__A )
return False if value is None else not bool(__A )
def __lowerCAmelCase ( self ) -> str:
return self._stage == 2
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._stage == 3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._offload
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Optional[int]:
lowerCAmelCase_ :Dict = engine
def __lowerCAmelCase ( self , __A , **__A ) -> str:
# runs backpropagation and handles mixed precision
self.engine.backward(__A , **__A )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> List[str]:
super().__init__(__A , device_placement=__A , scaler=__A )
lowerCAmelCase_ :List[str] = hasattr(self.optimizer , """overflow""" )
def __lowerCAmelCase ( self , __A=None ) -> Optional[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __lowerCAmelCase ( self ) -> List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __lowerCAmelCase ( self ) -> int:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A ) -> Optional[int]:
super().__init__(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=0.0_0_1 , __A=0 , **__A ) -> List[Any]:
lowerCAmelCase_ :str = params
lowerCAmelCase_ :Any = lr
lowerCAmelCase_ :List[Any] = weight_decay
lowerCAmelCase_ :Any = kwargs
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=None , __A=0 , **__A ) -> List[str]:
lowerCAmelCase_ :Optional[int] = optimizer
lowerCAmelCase_ :int = total_num_steps
lowerCAmelCase_ :List[Any] = warmup_num_steps
lowerCAmelCase_ :int = kwargs
| 1 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _snake_case ( lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = SwinvaConfig()
lowerCAmelCase_ :str = swinva_name.split("""_""" )
lowerCAmelCase_ :Optional[int] = name_split[1]
if "to" in name_split[3]:
lowerCAmelCase_ :Any = int(name_split[3][-3:] )
else:
lowerCAmelCase_ :Optional[int] = int(name_split[3] )
if "to" in name_split[2]:
lowerCAmelCase_ :List[str] = int(name_split[2][-2:] )
else:
lowerCAmelCase_ :Optional[Any] = int(name_split[2][6:] )
if model_size == "tiny":
lowerCAmelCase_ :Union[str, Any] = 9_6
lowerCAmelCase_ :str = (2, 2, 6, 2)
lowerCAmelCase_ :Dict = (3, 6, 1_2, 2_4)
elif model_size == "small":
lowerCAmelCase_ :int = 9_6
lowerCAmelCase_ :List[Any] = (2, 2, 1_8, 2)
lowerCAmelCase_ :Union[str, Any] = (3, 6, 1_2, 2_4)
elif model_size == "base":
lowerCAmelCase_ :Dict = 1_2_8
lowerCAmelCase_ :Union[str, Any] = (2, 2, 1_8, 2)
lowerCAmelCase_ :str = (4, 8, 1_6, 3_2)
else:
lowerCAmelCase_ :Dict = 1_9_2
lowerCAmelCase_ :List[str] = (2, 2, 1_8, 2)
lowerCAmelCase_ :Dict = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
lowerCAmelCase_ :List[Any] = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
lowerCAmelCase_ :List[Any] = 2_1_8_4_1
lowerCAmelCase_ :Union[str, Any] = """huggingface/label-files"""
lowerCAmelCase_ :Optional[int] = """imagenet-22k-id2label.json"""
lowerCAmelCase_ :Union[str, Any] = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ :Optional[Any] = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase_ :Dict = idalabel
lowerCAmelCase_ :Any = {v: k for k, v in idalabel.items()}
else:
lowerCAmelCase_ :Optional[Any] = 1_0_0_0
lowerCAmelCase_ :List[Any] = """huggingface/label-files"""
lowerCAmelCase_ :str = """imagenet-1k-id2label.json"""
lowerCAmelCase_ :Optional[int] = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ :str = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase_ :Optional[int] = idalabel
lowerCAmelCase_ :List[Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ :Any = img_size
lowerCAmelCase_ :Any = num_classes
lowerCAmelCase_ :List[str] = embed_dim
lowerCAmelCase_ :Optional[Any] = depths
lowerCAmelCase_ :List[str] = num_heads
lowerCAmelCase_ :Optional[int] = window_size
return config
def _snake_case ( lowercase__ : List[Any] ) -> List[str]:
'''simple docstring'''
if "patch_embed.proj" in name:
lowerCAmelCase_ :Tuple = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCAmelCase_ :Dict = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
lowerCAmelCase_ :List[str] = """encoder.""" + name
if "attn.proj" in name:
lowerCAmelCase_ :Dict = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCAmelCase_ :List[Any] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCAmelCase_ :int = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCAmelCase_ :Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCAmelCase_ :int = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase_ :Union[str, Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowerCAmelCase_ :Optional[Any] = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowerCAmelCase_ :Any = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowerCAmelCase_ :List[str] = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowerCAmelCase_ :Tuple = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if name == "norm.weight":
lowerCAmelCase_ :Dict = """layernorm.weight"""
if name == "norm.bias":
lowerCAmelCase_ :List[Any] = """layernorm.bias"""
if "head" in name:
lowerCAmelCase_ :Union[str, Any] = name.replace("""head""" , """classifier""" )
else:
lowerCAmelCase_ :Any = """swinv2.""" + name
return name
def _snake_case ( lowercase__ : int , lowercase__ : Any ) -> Tuple:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ :Any = orig_state_dict.pop(UpperCAmelCase_ )
if "mask" in key:
continue
elif "qkv" in key:
lowerCAmelCase_ :Tuple = key.split(""".""" )
lowerCAmelCase_ :int = int(key_split[1] )
lowerCAmelCase_ :int = int(key_split[3] )
lowerCAmelCase_ :Tuple = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase_ :int = val[:dim, :]
lowerCAmelCase_ :str = val[dim : dim * 2, :]
lowerCAmelCase_ :Union[str, Any] = val[-dim:, :]
else:
lowerCAmelCase_ :Union[str, Any] = val[:dim]
lowerCAmelCase_ :Optional[int] = val[
dim : dim * 2
]
lowerCAmelCase_ :Dict = val[-dim:]
else:
lowerCAmelCase_ :Union[str, Any] = val
return orig_state_dict
def _snake_case ( lowercase__ : str , lowercase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = timm.create_model(UpperCAmelCase_ , pretrained=UpperCAmelCase_ )
timm_model.eval()
lowerCAmelCase_ :Tuple = get_swinva_config(UpperCAmelCase_ )
lowerCAmelCase_ :List[Any] = SwinvaForImageClassification(UpperCAmelCase_ )
model.eval()
lowerCAmelCase_ :str = convert_state_dict(timm_model.state_dict() , UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
lowerCAmelCase_ :Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ :Dict = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" , """-""" ) ) )
lowerCAmelCase_ :Tuple = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
lowerCAmelCase_ :int = image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" )
lowerCAmelCase_ :List[Any] = timm_model(inputs["""pixel_values"""] )
lowerCAmelCase_ :Any = model(**UpperCAmelCase_ ).logits
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 )
print(f"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCAmelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCAmelCase_ )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase_ , UpperCAmelCase_ ) , organization="""nandwalritik""" , commit_message="""Add model""" , )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swinv2_name',
default='swinv2_tiny_patch4_window8_256',
type=str,
help='Name of the Swinv2 timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__UpperCAmelCase = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 352 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCAmelCase_ :List[str] = "CIDAS/clipseg-rd64-refined"
UpperCAmelCase_ :List[Any] = "image_segmenter"
UpperCAmelCase_ :Optional[int] = CLIPSegForImageSegmentation
UpperCAmelCase_ :Tuple = ["image", "text"]
UpperCAmelCase_ :Dict = ["image"]
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A ) -> Any:
return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" )
def __lowerCAmelCase ( self , __A ) -> Tuple:
with torch.no_grad():
lowerCAmelCase_ :Dict = self.model(**__A ).logits
return logits
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :Optional[int] = outputs.cpu().detach().numpy()
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :str = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 1 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[str] = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=__A ).to(__A )
lowerCAmelCase_ :int = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowerCAmelCase_ :List[str] = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
lowerCAmelCase_ :Union[str, Any] = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
lowerCAmelCase_ :Dict = model(input_ids.to(__A ) , labels=labels.to(__A ) ).loss
lowerCAmelCase_ :int = -(labels.shape[-1] * loss.item())
lowerCAmelCase_ :str = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 353 |
"""simple docstring"""
def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :Dict = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase_ :str = values[index] + knapsack(
lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 )
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :Tuple = len(lowercase__ )
lowerCAmelCase_ :Dict = len(lowercase__ )
lowerCAmelCase_ :Optional[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
lowerCAmelCase_ :Optional[int] = True
for i in range(lowercase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCAmelCase_ :Tuple = True
if a[i].islower():
lowerCAmelCase_ :Optional[int] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _snake_case ( lowercase__ : bool = True , *lowercase__ : Optional[int] , **lowercase__ : str ) -> Optional[Any]:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
lowerCAmelCase_ :Tuple = False
if main_process_only:
lowerCAmelCase_ :Dict = PartialState().local_process_index == 0
return _tqdm(*lowercase__ , **lowercase__ , disable=lowercase__ )
| 1 | 0 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__UpperCAmelCase = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
__UpperCAmelCase = F"""https://www.google.com/search?q={query}&num=100"""
__UpperCAmelCase = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
__UpperCAmelCase = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
__UpperCAmelCase = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 355 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__UpperCAmelCase = get_tests_dir('fixtures/dummy-config.json')
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = 0
def __lowerCAmelCase ( self ) -> List[str]:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCAmelCase_ :int = os.path.join(__A , """fake-roberta""" )
os.makedirs(__A , exist_ok=__A )
with open(os.path.join(__A , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertEqual(type(__A ) , __A )
def __lowerCAmelCase ( self ) -> Optional[int]:
try:
AutoConfig.register("""custom""" , __A )
# Wrong model type will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""model""" , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""bert""" , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase_ :Union[str, Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Optional[int] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowerCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
__A , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""bert-base""" )
def __lowerCAmelCase ( self ) -> Any:
with self.assertRaisesRegex(
__A , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , revision="""aaaaaa""" )
def __lowerCAmelCase ( self ) -> int:
with self.assertRaisesRegex(
__A , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def __lowerCAmelCase ( self ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
lowerCAmelCase_ :str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def __lowerCAmelCase ( self ) -> int:
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :int = "new-model"
try:
AutoConfig.register("""new-model""" , __A )
# If remote code is not set, the default is to use local
lowerCAmelCase_ :Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
lowerCAmelCase_ :Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 1 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__UpperCAmelCase = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def _snake_case ( lowercase__ : int , lowercase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
inspect_dataset(lowercase_ , lowercase_ )
lowerCAmelCase_ :Tuple = path + """.py"""
assert script_name in os.listdir(lowercase_ )
assert "__pycache__" not in os.listdir(lowercase_ )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
inspect_metric(lowercase_ , lowercase_ )
lowerCAmelCase_ :Optional[int] = path + """.py"""
assert script_name in os.listdir(lowercase_ )
assert "__pycache__" not in os.listdir(lowercase_ )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def _snake_case ( lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = get_dataset_config_info(lowercase_ , config_name=lowercase_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Dict ) -> List[str]:
'''simple docstring'''
with pytest.raises(lowercase_ ):
get_dataset_config_info(lowercase_ , config_name=lowercase_ )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Any ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Tuple = get_dataset_config_names(lowercase_ )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = get_dataset_infos(lowercase_ )
assert list(infos.keys() ) == expected_configs
lowerCAmelCase_ :List[str] = expected_configs[0]
assert expected_config in infos
lowerCAmelCase_ :Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def _snake_case ( lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = get_dataset_infos(lowercase_ )
assert expected_config in infos
lowerCAmelCase_ :Tuple = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> str:
'''simple docstring'''
with pytest.raises(lowercase_ ):
get_dataset_split_names(lowercase_ , config_name=lowercase_ )
| 356 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :str = GPTSanJapaneseTokenizer
UpperCAmelCase_ :Optional[int] = False
UpperCAmelCase_ :Optional[int] = {"do_clean_text": False, "add_prefix_space": False}
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
# fmt: off
lowerCAmelCase_ :Dict = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase_ :List[str] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowerCAmelCase_ :int = {"""unk_token""": """<unk>"""}
lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__A ) )
def __lowerCAmelCase ( self , **__A ) -> int:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__A )
def __lowerCAmelCase ( self , __A ) -> Dict:
lowerCAmelCase_ :List[Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowerCAmelCase_ :Optional[int] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def __lowerCAmelCase ( self , __A ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = self.get_input_output_texts(__A )
lowerCAmelCase_ :List[str] = tokenizer.encode(__A , add_special_tokens=__A )
lowerCAmelCase_ :str = tokenizer.decode(__A , clean_up_tokenization_spaces=__A )
return text, ids
def __lowerCAmelCase ( self ) -> str:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Dict:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> int:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Union[str, Any] = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ :Optional[int] = """こんにちは、世界。 こんばんは、㔺界。"""
lowerCAmelCase_ :Any = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowerCAmelCase_ :Tuple = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids without special tokens
lowerCAmelCase_ :List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCAmelCase_ :List[str] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids with special tokens
lowerCAmelCase_ :Any = tokens + [tokenizer.unk_token]
lowerCAmelCase_ :Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCAmelCase_ :Union[str, Any] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ :Optional[int] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowerCAmelCase_ :str = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowerCAmelCase_ :str = tokenizer.encode(__A )
lowerCAmelCase_ :Dict = tokenizer.decode(__A )
self.assertEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase_ :Optional[Any] = """こんにちは、世界。"""
lowerCAmelCase_ :Any = """こんばんは、㔺界。😀"""
lowerCAmelCase_ :Optional[Any] = """こんにちは、世界。こんばんは、世界。😀"""
lowerCAmelCase_ :List[Any] = tokenizer.encode(prefix_text + input_text )
lowerCAmelCase_ :List[str] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowerCAmelCase_ :int = tokenizer.encode(__A , prefix_text=__A )
lowerCAmelCase_ :int = tokenizer.decode(__A )
lowerCAmelCase_ :Dict = tokenizer.decode(__A )
lowerCAmelCase_ :Tuple = tokenizer.decode(__A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase_ :List[Any] = """こんにちは、世界。"""
lowerCAmelCase_ :Optional[int] = """こんばんは、㔺界。😀"""
lowerCAmelCase_ :List[str] = len(tokenizer.encode(__A ) ) - 2
lowerCAmelCase_ :Dict = len(tokenizer.encode(__A ) ) - 2
lowerCAmelCase_ :int = [1] + [0] * (len_prefix + len_text + 1)
lowerCAmelCase_ :List[Any] = [1] * (len_prefix + len_text + 1) + [0]
lowerCAmelCase_ :Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCAmelCase_ :List[Any] = tokenizer(prefix_text + input_text ).token_type_ids
lowerCAmelCase_ :List[str] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowerCAmelCase_ :List[Any] = tokenizer(__A , prefix_text=__A ).token_type_ids
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase_ :int = tokenizer.encode("""あンいワ""" )
lowerCAmelCase_ :Optional[Any] = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowerCAmelCase_ :int = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertNotEqual(__A , __A )
self.assertNotEqual(__A , __A )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase_ :int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowerCAmelCase_ :Dict = tokenizer(__A , padding=__A )
lowerCAmelCase_ :Any = tokenizer.batch_encode_plus(__A , padding=__A )
# fmt: off
lowerCAmelCase_ :int = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowerCAmelCase_ :List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCAmelCase_ :int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __A )
self.assertListEqual(x_token.token_type_ids , __A )
self.assertListEqual(x_token.attention_mask , __A )
self.assertListEqual(x_token_a.input_ids , __A )
self.assertListEqual(x_token_a.token_type_ids , __A )
self.assertListEqual(x_token_a.attention_mask , __A )
def __lowerCAmelCase ( self ) -> Tuple:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __lowerCAmelCase ( self ) -> str:
# tokenizer has no padding token
pass
| 1 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :List[Any] = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[str]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Any = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :str = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Optional[int]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :int = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Tuple:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Tuple = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[str]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :List[Any] = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Any = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[str]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Optional[int] = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :List[Any] = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Dict:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Tuple = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Union[str, Any] = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Dict:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :str = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Any:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Optional[int] = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Tuple:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Dict = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Optional[int]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Tuple = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Union[str, Any]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Optional[Any] = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Dict:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Union[str, Any] = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Any = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> str:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Any = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Any:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Tuple = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Any:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :int = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[str]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :str = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Tuple:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :str = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[str]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Dict = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Union[str, Any] = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Any = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Optional[int]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :int = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :int = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Any = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Any:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :Tuple = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Optional[int]:
requires_backends(self , ["""sentencepiece"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=a_ ):
UpperCAmelCase_ :str = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Any:
requires_backends(self , ["""sentencepiece"""] )
| 357 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__UpperCAmelCase = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
__UpperCAmelCase = dataset.iloc[:, 1:2].values
__UpperCAmelCase = dataset.iloc[:, 2].values
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0)
__UpperCAmelCase = PolynomialFeatures(degree=4)
__UpperCAmelCase = poly_reg.fit_transform(X)
__UpperCAmelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def _snake_case ( ) -> str:
'''simple docstring'''
plt.scatter(lowercase__ , lowercase__ , color="""red""" )
plt.plot(lowercase__ , pol_reg.predict(poly_reg.fit_transform(lowercase__ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 1 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Any = 42
UpperCAmelCase_ :Any = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('>=', '0.0.12')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = 42
UpperCAmelCase_ :str = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 358 |
"""simple docstring"""
from __future__ import annotations
__UpperCAmelCase = 1.6021e-19 # units = C
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float , ) -> tuple[str, float]:
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 0 |
import operator as op
__UpperCAmelCase = "scaler.pt"
__UpperCAmelCase = "pytorch_model"
__UpperCAmelCase = "random_states"
__UpperCAmelCase = "optimizer"
__UpperCAmelCase = "scheduler"
__UpperCAmelCase = "pytorch_model.bin"
__UpperCAmelCase = "pytorch_model.bin.index.json"
__UpperCAmelCase = "model.safetensors"
__UpperCAmelCase = "model.safetensors.index.json"
__UpperCAmelCase = "1.10.2"
__UpperCAmelCase = "py38"
__UpperCAmelCase = "4.17.0"
__UpperCAmelCase = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
__UpperCAmelCase = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
__UpperCAmelCase = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
__UpperCAmelCase = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
__UpperCAmelCase = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
__UpperCAmelCase = "2.0.1"
__UpperCAmelCase = ["pdsh", "standard", "openmpi", "mvapich"]
__UpperCAmelCase = ["default", "reduce-overhead", "max-autotune"]
__UpperCAmelCase = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
__UpperCAmelCase = [
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
__UpperCAmelCase = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
__UpperCAmelCase = ["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 359 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 1 | 0 |
"""simple docstring"""
import argparse
import copy
def _snake_case ( lowercase__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = {}
with open(_a ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowerCAmelCase_ :Optional[Any] = []
_list.append([line.split()[1], line.split()[2]] )
lowerCAmelCase_ :Union[str, Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowerCAmelCase_ :Optional[Any] = []
_list.append([line.split()[0], line.split()[2]] )
lowerCAmelCase_ :Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _snake_case ( lowercase__ : int , lowercase__ : str ) -> Optional[Any]:
'''simple docstring'''
with open(_a ) as f:
lowerCAmelCase_ :str = f.read(1 )
lowerCAmelCase_ :Dict = start_node
lowerCAmelCase_ :List[str] = []
lowerCAmelCase_ :List[str] = start_node
lowerCAmelCase_ :Dict = 0
while visiting not in first_solution:
lowerCAmelCase_ :Tuple = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_a ) and k[0] not in first_solution:
lowerCAmelCase_ :Union[str, Any] = k[1]
lowerCAmelCase_ :Any = k[0]
first_solution.append(_a )
lowerCAmelCase_ :Optional[Any] = distance_of_first_solution + int(_a )
lowerCAmelCase_ :List[str] = best_node
first_solution.append(_a )
lowerCAmelCase_ :str = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowerCAmelCase_ :Any = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def _snake_case ( lowercase__ : Any , lowercase__ : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :str = []
for n in solution[1:-1]:
lowerCAmelCase_ :Optional[Any] = solution.index(_a )
for kn in solution[1:-1]:
lowerCAmelCase_ :Dict = solution.index(_a )
if n == kn:
continue
lowerCAmelCase_ :int = copy.deepcopy(_a )
lowerCAmelCase_ :Optional[Any] = kn
lowerCAmelCase_ :Dict = n
lowerCAmelCase_ :Optional[int] = 0
for k in _tmp[:-1]:
lowerCAmelCase_ :List[Any] = _tmp[_tmp.index(_a ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowerCAmelCase_ :Optional[Any] = distance + int(i[1] )
_tmp.append(_a )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowerCAmelCase_ :Union[str, Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowercase__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _snake_case ( lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : List[str] , lowercase__ : List[str] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = 1
lowerCAmelCase_ :Optional[Any] = first_solution
lowerCAmelCase_ :str = []
lowerCAmelCase_ :Optional[int] = distance_of_first_solution
lowerCAmelCase_ :Optional[Any] = solution
while count <= iters:
lowerCAmelCase_ :Tuple = find_neighborhood(_a , _a )
lowerCAmelCase_ :Union[str, Any] = 0
lowerCAmelCase_ :Tuple = neighborhood[index_of_best_solution]
lowerCAmelCase_ :List[Any] = len(_a ) - 1
lowerCAmelCase_ :str = False
while not found:
lowerCAmelCase_ :Tuple = 0
while i < len(_a ):
if best_solution[i] != solution[i]:
lowerCAmelCase_ :int = best_solution[i]
lowerCAmelCase_ :str = solution[i]
break
lowerCAmelCase_ :Optional[Any] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowerCAmelCase_ :List[str] = True
lowerCAmelCase_ :List[str] = best_solution[:-1]
lowerCAmelCase_ :Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowerCAmelCase_ :List[Any] = cost
lowerCAmelCase_ :int = solution
else:
lowerCAmelCase_ :Optional[Any] = index_of_best_solution + 1
lowerCAmelCase_ :Optional[Any] = neighborhood[index_of_best_solution]
if len(_a ) >= size:
tabu_list.pop(0 )
lowerCAmelCase_ :Tuple = count + 1
return best_solution_ever, best_cost
def _snake_case ( lowercase__ : Any=None ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = generate_neighbours(args.File )
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = generate_first_solution(
args.File , _a )
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = tabu_search(
_a , _a , _a , args.Iterations , args.Size , )
print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 360 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _snake_case ( lowercase__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
lowerCAmelCase_ :Dict = f"""https://www.amazon.in/laptop/s?k={product}"""
lowerCAmelCase_ :List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
lowerCAmelCase_ :List[Any] = BeautifulSoup(requests.get(lowercase__ , headers=lowercase__ ).text )
# Initialize a Pandas dataframe with the column titles
lowerCAmelCase_ :Union[str, Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
lowerCAmelCase_ :str = item.ha.text
lowerCAmelCase_ :Dict = """https://www.amazon.in/""" + item.ha.a["""href"""]
lowerCAmelCase_ :int = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
lowerCAmelCase_ :Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
lowerCAmelCase_ :int = """Not available"""
try:
lowerCAmelCase_ :str = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
lowerCAmelCase_ :Optional[Any] = """"""
try:
lowerCAmelCase_ :str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_0_0 )
except ValueError:
lowerCAmelCase_ :Union[str, Any] = float("""nan""" )
except AttributeError:
pass
lowerCAmelCase_ :Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCAmelCase_ :List[Any] = """ """
lowerCAmelCase_ :Tuple = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__UpperCAmelCase = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 1 | 0 |
"""simple docstring"""
import os
import numpy
import onnx
def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : List[str] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :str = a.name
lowerCAmelCase_ :Tuple = b.name
lowerCAmelCase_ :Union[str, Any] = ''''''
lowerCAmelCase_ :int = ''''''
lowerCAmelCase_ :str = a == b
lowerCAmelCase_ :Optional[Any] = name_a
lowerCAmelCase_ :Union[str, Any] = name_b
return res
def _snake_case ( lowercase__ : Dict , lowercase__ : Any , lowercase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__lowerCAmelCase , __lowerCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __lowerCAmelCase , __lowerCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , __lowerCAmelCase , __lowerCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __lowerCAmelCase , __lowerCAmelCase )
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : str , lowercase__ : List[Any] ) -> Dict:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Dict , lowercase__ : Any ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :str = list(model.graph.initializer )
lowerCAmelCase_ :List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowerCAmelCase_ :Tuple = inits[i].name
lowerCAmelCase_ :Any = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __lowerCAmelCase , __lowerCAmelCase )
def _snake_case ( lowercase__ : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = os.path.dirname(__lowerCAmelCase )
lowerCAmelCase_ :int = os.path.basename(__lowerCAmelCase )
lowerCAmelCase_ :Optional[int] = onnx.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
lowerCAmelCase_ :List[Any] = list(model.graph.initializer )
lowerCAmelCase_ :Any = set()
lowerCAmelCase_ :Tuple = {}
lowerCAmelCase_ :Optional[Any] = []
lowerCAmelCase_ :str = 0
for i in range(len(__lowerCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__lowerCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__lowerCAmelCase )
dup_set.add(__lowerCAmelCase )
lowerCAmelCase_ :Optional[Any] = inits[j].data_type
lowerCAmelCase_ :str = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print("""unexpected data type: """ , __lowerCAmelCase )
total_reduced_size += mem_size
lowerCAmelCase_ :Optional[int] = inits[i].name
lowerCAmelCase_ :int = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__lowerCAmelCase )
else:
lowerCAmelCase_ :Any = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , """GB""" )
lowerCAmelCase_ :Tuple = sorted(__lowerCAmelCase )
_remove_dup_initializers_from_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase_ :Dict = '''optimized_''' + model_file_name
lowerCAmelCase_ :Dict = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
onnx.save(__lowerCAmelCase , __lowerCAmelCase )
return new_model
| 361 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Any = """laion/clap-htsat-unfused"""
lowerCAmelCase_ :Optional[Any] = tempfile.mkdtemp()
def __lowerCAmelCase ( self , **__A ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self , **__A ) -> Tuple:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :Optional[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Dict = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase_ :Dict = self.get_feature_extractor(do_normalize=__A , padding_value=1.0 )
lowerCAmelCase_ :Union[str, Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = self.get_feature_extractor()
lowerCAmelCase_ :str = self.get_tokenizer()
lowerCAmelCase_ :List[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :Optional[Any] = floats_list((3, 1000) )
lowerCAmelCase_ :Optional[Any] = feature_extractor(__A , return_tensors="""np""" )
lowerCAmelCase_ :str = processor(audios=__A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :List[Any] = """This is a test string"""
lowerCAmelCase_ :Dict = processor(text=__A )
lowerCAmelCase_ :List[str] = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :int = self.get_feature_extractor()
lowerCAmelCase_ :Tuple = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ :Tuple = processor.batch_decode(__A )
lowerCAmelCase_ :Optional[Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 1 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , __A , __A=7 , __A=3 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , __A=True , __A=1 / 255 , __A=True , ) -> Any:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase_ :Optional[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
lowerCAmelCase_ :str = parent
lowerCAmelCase_ :Optional[int] = batch_size
lowerCAmelCase_ :Union[str, Any] = num_channels
lowerCAmelCase_ :str = min_resolution
lowerCAmelCase_ :str = max_resolution
lowerCAmelCase_ :Optional[int] = do_resize
lowerCAmelCase_ :Dict = size
lowerCAmelCase_ :str = do_normalize
lowerCAmelCase_ :int = image_mean
lowerCAmelCase_ :Any = image_std
lowerCAmelCase_ :str = do_rescale
lowerCAmelCase_ :str = rescale_factor
lowerCAmelCase_ :int = do_pad
def __lowerCAmelCase ( self ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __lowerCAmelCase ( self , __A , __A=False ) -> Any:
if not batched:
lowerCAmelCase_ :Union[str, Any] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
lowerCAmelCase_ :Optional[Any] = image.size
else:
lowerCAmelCase_ :Tuple = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase_ :str = int(self.size["""shortest_edge"""] * h / w )
lowerCAmelCase_ :Dict = self.size["shortest_edge"]
elif w > h:
lowerCAmelCase_ :Optional[int] = self.size["shortest_edge"]
lowerCAmelCase_ :Optional[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCAmelCase_ :int = self.size["shortest_edge"]
lowerCAmelCase_ :Optional[Any] = self.size["shortest_edge"]
else:
lowerCAmelCase_ :List[str] = []
for image in image_inputs:
lowerCAmelCase_ :Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase_ :List[Any] = max(lowerCAmelCase__ , key=lambda __A : item[0] )[0]
lowerCAmelCase_ :Tuple = max(lowerCAmelCase__ , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( a__ , unittest.TestCase ):
UpperCAmelCase_ :Optional[Any] = DetaImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Any = DetaImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_rescale""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_pad""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ :Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
lowerCAmelCase_ :str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase_ :Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ :Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
lowerCAmelCase_ :Tuple = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Initialize image_processing
lowerCAmelCase_ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
lowerCAmelCase_ :Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase_ :List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ :Tuple = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase_ :Optional[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ) -> int:
# Initialize image_processing
lowerCAmelCase_ :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
lowerCAmelCase_ :Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase_ :Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ :str = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase_ :Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCAmelCase ( self ) -> str:
# prepare image and target
lowerCAmelCase_ :Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCAmelCase_ :int = json.loads(f.read() )
lowerCAmelCase_ :List[str] = {"image_id": 3_9769, "annotations": target}
# encode them
lowerCAmelCase_ :Dict = DetaImageProcessor()
lowerCAmelCase_ :Optional[Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase_ :Dict = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCAmelCase__ )
lowerCAmelCase_ :Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
lowerCAmelCase_ :Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCAmelCase__ ) )
# verify boxes
lowerCAmelCase_ :Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCAmelCase__ )
lowerCAmelCase_ :List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ :Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCAmelCase__ ) )
# verify is_crowd
lowerCAmelCase_ :Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCAmelCase__ ) )
# verify class_labels
lowerCAmelCase_ :str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCAmelCase__ ) )
# verify orig_size
lowerCAmelCase_ :int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCAmelCase__ ) )
# verify size
lowerCAmelCase_ :List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCAmelCase__ ) )
@slow
def __lowerCAmelCase ( self ) -> Any:
# prepare image, target and masks_path
lowerCAmelCase_ :Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCAmelCase_ :Dict = json.loads(f.read() )
lowerCAmelCase_ :Optional[int] = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
lowerCAmelCase_ :Optional[int] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCAmelCase_ :List[Any] = DetaImageProcessor(format="""coco_panoptic""" )
lowerCAmelCase_ :Optional[Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase_ :Optional[int] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCAmelCase__ )
lowerCAmelCase_ :Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
lowerCAmelCase_ :str = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCAmelCase__ ) )
# verify boxes
lowerCAmelCase_ :Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCAmelCase__ )
lowerCAmelCase_ :List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ :Optional[int] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCAmelCase__ ) )
# verify is_crowd
lowerCAmelCase_ :Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCAmelCase__ ) )
# verify class_labels
lowerCAmelCase_ :Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCAmelCase__ ) )
# verify masks
lowerCAmelCase_ :Union[str, Any] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowerCAmelCase__ )
# verify orig_size
lowerCAmelCase_ :Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCAmelCase__ ) )
# verify size
lowerCAmelCase_ :Optional[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCAmelCase__ ) )
| 362 |
"""simple docstring"""
import os
from math import logaa
def _snake_case ( lowercase__ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
lowerCAmelCase_ :float = 0
lowerCAmelCase_ :Union[str, Any] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = list(map(lowercase__ , line.split(""",""" ) ) )
if x * logaa(lowercase__ ) > largest:
lowerCAmelCase_ :Any = x * logaa(lowercase__ )
lowerCAmelCase_ :List[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Dict = 0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def _snake_case ( lowercase__ : int = 1_0_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Any = 1
lowerCAmelCase_ :List[str] = 2
for i in range(2 , max_n + 1 ):
lowerCAmelCase_ :Optional[int] = pre_numerator
lowerCAmelCase_ :List[str] = 2 * i // 3 if i % 3 == 0 else 1
lowerCAmelCase_ :int = cur_numerator
lowerCAmelCase_ :List[Any] = e_cont * pre_numerator + temp
return sum_digits(lowercase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 363 |
"""simple docstring"""
import itertools
import math
def _snake_case ( lowercase__ : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = 2
while True:
if is_prime(lowercase__ ):
yield num
num += 1
def _snake_case ( lowercase__ : int = 1_0_0_0_1 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , lowercase__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def _snake_case ( lowercase__ : int ) -> list[int]:
'''simple docstring'''
if num <= 0:
lowerCAmelCase_ :Dict = f"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(snake_case__ )
lowerCAmelCase_ :Optional[Any] = [True] * (num + 1)
lowerCAmelCase_ :List[Any] = []
lowerCAmelCase_ :List[Any] = 2
lowerCAmelCase_ :str = int(math.sqrt(snake_case__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(snake_case__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , snake_case__ ):
if sieve[i] is True:
lowerCAmelCase_ :List[str] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(snake_case__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 364 |
"""simple docstring"""
def _snake_case ( lowercase__ : int = 5_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
UpperCAmelCase_ :Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ :Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :List[Any] = CLIPTextModel(__A )
lowerCAmelCase_ :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Union[str, Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> List[str]:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Tuple = torch.manual_seed(__A )
else:
lowerCAmelCase_ :Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :List[Any] = 2
lowerCAmelCase_ :int = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , )
lowerCAmelCase_ :Optional[int] = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :List[str] = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__A ):
if isinstance(__A , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :str = CLIPTextModel(__A )
lowerCAmelCase_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Optional[Any] = MultiControlNetModel([controlneta, controlneta] )
lowerCAmelCase_ :List[Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> str:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Optional[Any] = torch.manual_seed(__A )
else:
lowerCAmelCase_ :List[Any] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :Optional[Any] = 2
lowerCAmelCase_ :Optional[int] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
]
lowerCAmelCase_ :int = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
lowerCAmelCase_ :Union[str, Any] = 1_0.0
lowerCAmelCase_ :Union[str, Any] = 4
lowerCAmelCase_ :Tuple = self.get_dummy_inputs(__A )
lowerCAmelCase_ :List[str] = steps
lowerCAmelCase_ :int = scale
lowerCAmelCase_ :Union[str, Any] = pipe(**__A )[0]
lowerCAmelCase_ :Any = self.get_dummy_inputs(__A )
lowerCAmelCase_ :str = steps
lowerCAmelCase_ :str = scale
lowerCAmelCase_ :Tuple = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Union[str, Any] = steps
lowerCAmelCase_ :Union[str, Any] = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowerCAmelCase_ :List[str] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Optional[int] = steps
lowerCAmelCase_ :Tuple = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowerCAmelCase ( self ) -> Dict:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> Optional[int]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :str = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__A )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Any = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
lowerCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=__A , controlnet=__A )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase_ :List[Any] = """evil space-punk bird"""
lowerCAmelCase_ :List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
lowerCAmelCase_ :int = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
lowerCAmelCase_ :Union[str, Any] = pipe(
__A , __A , control_image=__A , generator=__A , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
lowerCAmelCase_ :Tuple = output.images[0]
assert image.shape == (512, 512, 3)
lowerCAmelCase_ :Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 1 | 0 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def _snake_case ( lowercase__ : Tuple , lowercase__ : str ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :int = XCLIPTextConfig()
# derive patch size from model name
lowerCAmelCase_ :Tuple = model_name.find("""patch""" )
lowerCAmelCase_ :Any = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
lowerCAmelCase_ :Any = XCLIPVisionConfig(patch_size=SCREAMING_SNAKE_CASE__ , num_frames=SCREAMING_SNAKE_CASE__ )
if "large" in model_name:
lowerCAmelCase_ :Optional[Any] = 7_6_8
lowerCAmelCase_ :int = 3_0_7_2
lowerCAmelCase_ :str = 1_2
lowerCAmelCase_ :Any = 1_0_2_4
lowerCAmelCase_ :List[Any] = 4_0_9_6
lowerCAmelCase_ :List[Any] = 1_6
lowerCAmelCase_ :str = 2_4
lowerCAmelCase_ :List[str] = 7_6_8
lowerCAmelCase_ :Optional[Any] = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ :str = 3_3_6
lowerCAmelCase_ :int = XCLIPConfig.from_text_vision_configs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "large" in model_name:
lowerCAmelCase_ :List[str] = 7_6_8
return config
def _snake_case ( lowercase__ : str ) -> List[str]:
'''simple docstring'''
if name == "token_embedding.weight":
lowerCAmelCase_ :Union[str, Any] = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
lowerCAmelCase_ :List[Any] = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
lowerCAmelCase_ :str = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowerCAmelCase_ :Optional[Any] = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowerCAmelCase_ :List[Any] = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowerCAmelCase_ :int = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
lowerCAmelCase_ :List[Any] = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
lowerCAmelCase_ :List[Any] = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
lowerCAmelCase_ :Tuple = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
lowerCAmelCase_ :str = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
lowerCAmelCase_ :int = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
lowerCAmelCase_ :Union[str, Any] = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
lowerCAmelCase_ :List[str] = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
lowerCAmelCase_ :Tuple = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
lowerCAmelCase_ :Dict = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
lowerCAmelCase_ :List[str] = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
lowerCAmelCase_ :Tuple = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
lowerCAmelCase_ :Any = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
lowerCAmelCase_ :Any = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
lowerCAmelCase_ :int = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
lowerCAmelCase_ :Union[str, Any] = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
lowerCAmelCase_ :List[Any] = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : str ) -> Optional[int]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ :Dict = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "attn.in_proj" in key:
lowerCAmelCase_ :Optional[int] = key.split(""".""" )
if key.startswith("""visual""" ):
lowerCAmelCase_ :str = key_split[3]
lowerCAmelCase_ :Union[str, Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowerCAmelCase_ :Dict = val[
:dim, :
]
lowerCAmelCase_ :Tuple = val[
dim : dim * 2, :
]
lowerCAmelCase_ :Any = val[
-dim:, :
]
else:
lowerCAmelCase_ :Optional[Any] = val[
:dim
]
lowerCAmelCase_ :int = val[
dim : dim * 2
]
lowerCAmelCase_ :List[Any] = val[
-dim:
]
else:
if "weight" in key:
lowerCAmelCase_ :Any = val[
:dim, :
]
lowerCAmelCase_ :Dict = val[
dim : dim * 2, :
]
lowerCAmelCase_ :str = val[
-dim:, :
]
else:
lowerCAmelCase_ :Dict = val[:dim]
lowerCAmelCase_ :Tuple = val[
dim : dim * 2
]
lowerCAmelCase_ :Optional[int] = val[-dim:]
elif key.startswith("""mit""" ):
lowerCAmelCase_ :Union[str, Any] = key_split[2]
lowerCAmelCase_ :List[str] = config.vision_config.mit_hidden_size
if "weight" in key:
lowerCAmelCase_ :Tuple = val[:dim, :]
lowerCAmelCase_ :List[Any] = val[dim : dim * 2, :]
lowerCAmelCase_ :Tuple = val[-dim:, :]
else:
lowerCAmelCase_ :Optional[int] = val[:dim]
lowerCAmelCase_ :int = val[dim : dim * 2]
lowerCAmelCase_ :int = val[-dim:]
else:
lowerCAmelCase_ :int = key_split[2]
lowerCAmelCase_ :Any = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase_ :Any = val[:dim, :]
lowerCAmelCase_ :Any = val[
dim : dim * 2, :
]
lowerCAmelCase_ :Optional[int] = val[-dim:, :]
else:
lowerCAmelCase_ :Union[str, Any] = val[:dim]
lowerCAmelCase_ :str = val[
dim : dim * 2
]
lowerCAmelCase_ :Tuple = val[-dim:]
else:
lowerCAmelCase_ :List[Any] = rename_key(SCREAMING_SNAKE_CASE__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowerCAmelCase_ :Any = val.T
lowerCAmelCase_ :int = val
return orig_state_dict
def _snake_case ( lowercase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
if num_frames == 8:
lowerCAmelCase_ :Optional[Any] = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 1_6:
lowerCAmelCase_ :List[Any] = '''eating_spaghetti.npy'''
elif num_frames == 3_2:
lowerCAmelCase_ :Optional[Any] = '''eating_spaghetti_32_frames.npy'''
lowerCAmelCase_ :Tuple = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" , )
lowerCAmelCase_ :List[Any] = np.load(SCREAMING_SNAKE_CASE__ )
return list(SCREAMING_SNAKE_CASE__ )
def _snake_case ( lowercase__ : Any , lowercase__ : str=None , lowercase__ : Any=False ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
lowerCAmelCase_ :Tuple = model_to_url[model_name]
lowerCAmelCase_ :List[str] = 8
if "16-frames" in model_name:
lowerCAmelCase_ :Tuple = 1_6
elif "shot" in model_name:
lowerCAmelCase_ :Optional[int] = 3_2
lowerCAmelCase_ :Optional[int] = get_xclip_config(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ :Union[str, Any] = XCLIPModel(SCREAMING_SNAKE_CASE__ )
model.eval()
if "drive" in checkpoint_url:
lowerCAmelCase_ :List[Any] = '''pytorch_model.bin'''
gdown.cached_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , quiet=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ :str = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )['''model''']
else:
lowerCAmelCase_ :List[str] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ )['''model''']
lowerCAmelCase_ :Dict = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ :Union[str, Any] = XCLIPModel(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ :Dict = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowerCAmelCase_ :Optional[Any] = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4
lowerCAmelCase_ :Union[str, Any] = VideoMAEImageProcessor(size=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ :Optional[Any] = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
lowerCAmelCase_ :Union[str, Any] = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
lowerCAmelCase_ :str = XCLIPProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ :Tuple = prepare_video(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ :List[Any] = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE__ )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
lowerCAmelCase_ :Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
# Verify outputs
lowerCAmelCase_ :Tuple = outputs.logits_per_video
lowerCAmelCase_ :str = logits_per_video.softmax(dim=1 )
print("""Probs:""" , SCREAMING_SNAKE_CASE__ )
# kinetics-400
if model_name == "xclip-base-patch32":
lowerCAmelCase_ :Optional[int] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowerCAmelCase_ :Optional[Any] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
lowerCAmelCase_ :List[Any] = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowerCAmelCase_ :Optional[int] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
lowerCAmelCase_ :Dict = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ :Tuple = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowerCAmelCase_ :Dict = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowerCAmelCase_ :Dict = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowerCAmelCase_ :str = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowerCAmelCase_ :Tuple = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowerCAmelCase_ :Any = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowerCAmelCase_ :Union[str, Any] = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowerCAmelCase_ :List[str] = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowerCAmelCase_ :List[str] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowerCAmelCase_ :int = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowerCAmelCase_ :Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowerCAmelCase_ :Optional[Any] = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowerCAmelCase_ :int = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
processor.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
slow_tokenizer.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__UpperCAmelCase = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 366 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ ):
UpperCAmelCase_ :List[str] = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self , __A , __A , __A = None , __A = 5_0257 , __A = 1024 , __A = 768 , __A = 12 , __A = 12 , __A = None , __A = "gelu_new" , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 1E-5 , __A = 0.0_2 , __A = True , __A = True , __A = False , __A = False , ) -> Optional[Any]:
super().__init__()
lowerCAmelCase_ :List[str] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
lowerCAmelCase_ :Union[str, Any] = prefix_inner_dim
lowerCAmelCase_ :str = prefix_hidden_dim
lowerCAmelCase_ :str = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCAmelCase_ :List[Any] = (
nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCAmelCase_ :Any = GPTaConfig(
vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , )
lowerCAmelCase_ :Any = GPTaLMHeadModel(__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , __A = None , ) -> List[str]:
lowerCAmelCase_ :str = self.transformer.transformer.wte(__A )
lowerCAmelCase_ :Any = self.encode_prefix(__A )
lowerCAmelCase_ :Optional[Any] = self.decode_prefix(__A )
lowerCAmelCase_ :Optional[int] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCAmelCase_ :int = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCAmelCase_ :Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCAmelCase_ :Tuple = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __lowerCAmelCase ( self , __A , __A ) -> torch.Tensor:
return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
return self.encode_prefix(__A )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[int]:
lowerCAmelCase_ :Tuple = torch.split(__A , 1 , dim=0 )
lowerCAmelCase_ :Optional[int] = []
lowerCAmelCase_ :List[str] = []
for feature in features:
lowerCAmelCase_ :Tuple = self.decode_prefix(feature.to(__A ) ) # back to the clip feature
# Only support beam search for now
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.generate_beam(
input_embeds=__A , device=__A , eos_token_id=__A )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCAmelCase_ :Tuple = torch.stack(__A )
lowerCAmelCase_ :int = torch.stack(__A )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A = 5 , __A = 67 , __A = 1.0 , __A = None , ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = eos_token_id
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :Any = None
lowerCAmelCase_ :int = torch.ones(__A , device=__A , dtype=torch.int )
lowerCAmelCase_ :Optional[int] = torch.zeros(__A , device=__A , dtype=torch.bool )
if input_embeds is not None:
lowerCAmelCase_ :List[str] = input_embeds
else:
lowerCAmelCase_ :Union[str, Any] = self.transformer.transformer.wte(__A )
for i in range(__A ):
lowerCAmelCase_ :Optional[int] = self.transformer(inputs_embeds=__A )
lowerCAmelCase_ :str = outputs.logits
lowerCAmelCase_ :str = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCAmelCase_ :Dict = logits.softmax(-1 ).log()
if scores is None:
lowerCAmelCase_ , lowerCAmelCase_ :Any = logits.topk(__A , -1 )
lowerCAmelCase_ :Union[str, Any] = generated.expand(__A , *generated.shape[1:] )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCAmelCase_ :List[str] = next_tokens
else:
lowerCAmelCase_ :List[Any] = tokens.expand(__A , *tokens.shape[1:] )
lowerCAmelCase_ :Any = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCAmelCase_ :List[Any] = -float(np.inf )
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Optional[int] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCAmelCase_ :List[Any] = scores_sum / seq_lengths[:, None]
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = scores_sum_average.view(-1 ).topk(__A , -1 )
lowerCAmelCase_ :Optional[Any] = next_tokens // scores_sum.shape[1]
lowerCAmelCase_ :Dict = seq_lengths[next_tokens_source]
lowerCAmelCase_ :Tuple = next_tokens % scores_sum.shape[1]
lowerCAmelCase_ :Optional[Any] = next_tokens.unsqueeze(1 )
lowerCAmelCase_ :str = tokens[next_tokens_source]
lowerCAmelCase_ :List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
lowerCAmelCase_ :Dict = generated[next_tokens_source]
lowerCAmelCase_ :Dict = scores_sum_average * seq_lengths
lowerCAmelCase_ :Tuple = is_stopped[next_tokens_source]
lowerCAmelCase_ :str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCAmelCase_ :List[Any] = torch.cat((generated, next_token_embed) , dim=1 )
lowerCAmelCase_ :Optional[int] = is_stopped + next_tokens.eq(__A ).squeeze()
if is_stopped.all():
break
lowerCAmelCase_ :str = scores / seq_lengths
lowerCAmelCase_ :Optional[int] = scores.argsort(descending=__A )
# tokens tensors are already padded to max_seq_length
lowerCAmelCase_ :Optional[Any] = [tokens[i] for i in order]
lowerCAmelCase_ :Dict = torch.stack(__A , dim=0 )
lowerCAmelCase_ :Tuple = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 1 | 0 |
"""simple docstring"""
from __future__ import annotations
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[Any] = text, pattern
lowerCAmelCase_ :Any = len(__A ), len(__A )
def __lowerCAmelCase ( self , __A ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __lowerCAmelCase ( self , __A ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __lowerCAmelCase ( self ) -> list[int]:
lowerCAmelCase_ :Optional[int] = []
for i in range(self.textLen - self.patLen + 1 ):
lowerCAmelCase_ :List[Any] = self.mismatch_in_text(__A )
if mismatch_index == -1:
positions.append(__A )
else:
lowerCAmelCase_ :str = self.match_in_pattern(self.text[mismatch_index] )
lowerCAmelCase_ :Dict = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__UpperCAmelCase = "ABAABA"
__UpperCAmelCase = "AB"
__UpperCAmelCase = BoyerMooreSearch(text, pattern)
__UpperCAmelCase = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 367 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "detr"
UpperCAmelCase_ :str = ["past_key_values"]
UpperCAmelCase_ :Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __A=True , __A=None , __A=3 , __A=100 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.0_2 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=1 , __A=5 , __A=2 , __A=1 , __A=1 , __A=5 , __A=2 , __A=0.1 , **__A , ) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase_ :int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__A , __A ):
lowerCAmelCase_ :str = backbone_config.get("""model_type""" )
lowerCAmelCase_ :List[Any] = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ :Optional[Any] = config_class.from_dict(__A )
# set timm attributes to None
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = None, None, None
lowerCAmelCase_ :Tuple = use_timm_backbone
lowerCAmelCase_ :Optional[int] = backbone_config
lowerCAmelCase_ :Optional[int] = num_channels
lowerCAmelCase_ :int = num_queries
lowerCAmelCase_ :List[Any] = d_model
lowerCAmelCase_ :Optional[int] = encoder_ffn_dim
lowerCAmelCase_ :Tuple = encoder_layers
lowerCAmelCase_ :int = encoder_attention_heads
lowerCAmelCase_ :Optional[Any] = decoder_ffn_dim
lowerCAmelCase_ :List[str] = decoder_layers
lowerCAmelCase_ :Dict = decoder_attention_heads
lowerCAmelCase_ :Dict = dropout
lowerCAmelCase_ :Tuple = attention_dropout
lowerCAmelCase_ :Union[str, Any] = activation_dropout
lowerCAmelCase_ :Any = activation_function
lowerCAmelCase_ :List[str] = init_std
lowerCAmelCase_ :Optional[int] = init_xavier_std
lowerCAmelCase_ :int = encoder_layerdrop
lowerCAmelCase_ :Union[str, Any] = decoder_layerdrop
lowerCAmelCase_ :List[str] = encoder_layers
lowerCAmelCase_ :Union[str, Any] = auxiliary_loss
lowerCAmelCase_ :str = position_embedding_type
lowerCAmelCase_ :List[Any] = backbone
lowerCAmelCase_ :str = use_pretrained_backbone
lowerCAmelCase_ :str = dilation
# Hungarian matcher
lowerCAmelCase_ :List[Any] = class_cost
lowerCAmelCase_ :Union[str, Any] = bbox_cost
lowerCAmelCase_ :Tuple = giou_cost
# Loss coefficients
lowerCAmelCase_ :Optional[int] = mask_loss_coefficient
lowerCAmelCase_ :Union[str, Any] = dice_loss_coefficient
lowerCAmelCase_ :Tuple = bbox_loss_coefficient
lowerCAmelCase_ :Tuple = giou_loss_coefficient
lowerCAmelCase_ :Dict = eos_coefficient
super().__init__(is_encoder_decoder=__A , **__A )
@property
def __lowerCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) -> int:
return self.d_model
@classmethod
def __lowerCAmelCase ( cls , __A , **__A ) -> Any:
return cls(backbone_config=__A , **__A )
def __lowerCAmelCase ( self ) -> Dict[str, any]:
lowerCAmelCase_ :List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase_ :Dict = self.backbone_config.to_dict()
lowerCAmelCase_ :str = self.__class__.model_type
return output
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[Any] = version.parse("1.11" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __lowerCAmelCase ( self ) -> float:
return 1E-5
@property
def __lowerCAmelCase ( self ) -> int:
return 12
| 1 | 0 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ : list[int] ) -> Tuple:
'''simple docstring'''
return len(set(__UpperCamelCase ) ) == len(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['DeiTFeatureExtractor']
__UpperCAmelCase = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['BeitFeatureExtractor']
__UpperCAmelCase = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 369 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _SCREAMING_SNAKE_CASE ( _a ):
UpperCAmelCase_ :Union[str, Any] = ["""image_processor""", """tokenizer"""]
UpperCAmelCase_ :Union[str, Any] = """LayoutLMv3ImageProcessor"""
UpperCAmelCase_ :Any = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self , __A=None , __A=None , **__A ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __lowerCAmelCase , )
lowerCAmelCase_ :List[str] = kwargs.pop("""feature_extractor""" )
lowerCAmelCase_ :str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self , __A , __A = None , __A = None , __A = None , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ) -> int:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
lowerCAmelCase_ :List[str] = self.image_processor(images=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase_ :int = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase_ :Tuple = features["""words"""]
lowerCAmelCase_ :str = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
# add pixel values
lowerCAmelCase_ :int = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
lowerCAmelCase_ :int = self.get_overflowing_images(__lowerCAmelCase , encoded_inputs["""overflow_to_sample_mapping"""] )
lowerCAmelCase_ :List[Any] = images
return encoded_inputs
def __lowerCAmelCase ( self , __A , __A ) -> Optional[int]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowerCAmelCase_ :Any = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f""" {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}""" )
return images_with_overflow
def __lowerCAmelCase ( self , *__A , **__A ) -> int:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCAmelCase ( self , *__A , **__A ) -> int:
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def __lowerCAmelCase ( self ) -> Dict:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __lowerCAmelCase , )
return self.image_processor_class
@property
def __lowerCAmelCase ( self ) -> List[str]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __lowerCAmelCase , )
return self.image_processor
| 370 |
"""simple docstring"""
__UpperCAmelCase = 2_56
# Modulus to hash a string
__UpperCAmelCase = 1_00_00_03
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :Tuple = len(lowercase__ )
lowerCAmelCase_ :List[str] = len(lowercase__ )
if p_len > t_len:
return False
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :Any = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase__ ):
lowerCAmelCase_ :int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCAmelCase_ :Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCAmelCase_ :Optional[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCAmelCase_ :Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _snake_case ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ :int = """abc1abc12"""
lowerCAmelCase_ :Dict = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowerCAmelCase_ :int = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(lowercase__ , lowercase__ ) and not rabin_karp(lowercase__ , lowercase__ )
# Test 2)
lowerCAmelCase_ :Dict = """ABABX"""
lowerCAmelCase_ :int = """ABABZABABYABABX"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 3)
lowerCAmelCase_ :Union[str, Any] = """AAAB"""
lowerCAmelCase_ :List[str] = """ABAAAAAB"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 4)
lowerCAmelCase_ :Dict = """abcdabcy"""
lowerCAmelCase_ :Union[str, Any] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 5)
lowerCAmelCase_ :Optional[int] = """Lü"""
lowerCAmelCase_ :Optional[int] = """Lüsai"""
assert rabin_karp(lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[int] = """Lue"""
assert not rabin_karp(lowercase__ , lowercase__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 1 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : str , lowercase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
return 1 if input_a == input_a else 0
def _snake_case ( ) -> Any:
'''simple docstring'''
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 371 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ :Optional[Any] = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :str = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ :int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ :List[Any] = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ :List[str] = 8
else:
lowerCAmelCase_ :Optional[int] = None
return tokenizer.pad(
lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1":
lowerCAmelCase_ :Optional[Any] = 2
# New Code #
lowerCAmelCase_ :List[str] = int(args.gradient_accumulation_steps )
lowerCAmelCase_ :int = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase_ :str = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :int = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ :Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ :Optional[Any] = AdamW(params=model.parameters() , lr=lowercase__ )
# Instantiate scheduler
lowerCAmelCase_ :Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
with LocalSGD(
accelerator=lowercase__ , model=lowercase__ , local_sgd_steps=lowercase__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase__ ):
lowerCAmelCase_ :str = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = output.loss
accelerator.backward(lowercase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
def _snake_case ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowercase__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=lowercase__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase_ :Optional[Any] = parser.parse_args()
lowerCAmelCase_ :Tuple = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 0 |
"""simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__UpperCAmelCase = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
__UpperCAmelCase = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
__UpperCAmelCase = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def _snake_case ( lowercase__ : Any , lowercase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
return float((preds == labels).mean() )
def _snake_case ( lowercase__ : List[Any] , lowercase__ : str ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :int = simple_accuracy(A__ , A__ )
lowerCAmelCase_ :Dict = float(fa_score(y_true=A__ , y_pred=A__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = float(pearsonr(A__ , A__ )[0] )
lowerCAmelCase_ :str = float(spearmanr(A__ , A__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> str:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def __lowerCAmelCase ( self , __A , __A ) -> Optional[Any]:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__A , __A )}
elif self.config_name == "stsb":
return pearson_and_spearman(__A , __A )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__A , __A )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__A , __A )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 350 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 , lowercase__ : str = "bert-base-cased" ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained(lowercase__ )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ :str = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : int ) -> List[str]:
'''simple docstring'''
model.eval()
lowerCAmelCase_ :Dict = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
lowerCAmelCase_ :Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase_ :Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Tuple = metric.compute()
return eval_metric["accuracy"]
def _snake_case ( lowercase__ : str , lowercase__ : List[str] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :Optional[int] = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Optional[Any] = args.model_name_or_path
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :str = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
lowerCAmelCase_ :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ :str = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ :Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCAmelCase_ :Any = 1
lowerCAmelCase_ :str = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ :List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
lowerCAmelCase_ :int = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ :List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ :List[Any] = 0
lowerCAmelCase_ :str = evaluate.load("""glue""" , """mrpc""" )
lowerCAmelCase_ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase_ :Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase_ :Optional[Any] = args.resume_from_checkpoint.split("""epoch_""" )[1]
lowerCAmelCase_ :int = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase_ :Union[str, Any] = int(lowercase__ ) + 1
lowerCAmelCase_ :Optional[int] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.print("""resumed checkpoint performance:""" , lowercase__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
lowerCAmelCase_ :List[str] = json.load(lowercase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase_ :List[Any] = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Dict = outputs.loss
lowerCAmelCase_ :int = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase_ :List[str] = f"""epoch_{epoch}"""
lowerCAmelCase_ :Any = os.path.join(args.output_dir , lowercase__ )
accelerator.save_state(lowercase__ )
lowerCAmelCase_ :List[Any] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :Union[str, Any] = accuracy
lowerCAmelCase_ :Any = lr_scheduler.get_lr()[0]
lowerCAmelCase_ :str = optimizer.param_groups[0]["""lr"""]
lowerCAmelCase_ :List[Any] = epoch
lowerCAmelCase_ :Tuple = overall_step
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def _snake_case ( ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowercase__ , default=lowercase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowercase__ , default=lowercase__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=2 , help="""Number of train epochs.""" , )
lowerCAmelCase_ :Optional[int] = parser.parse_args()
lowerCAmelCase_ :List[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 0 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__UpperCAmelCase = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
__UpperCAmelCase = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
__UpperCAmelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _snake_case ( lowercase__ : str ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :int = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _snake_case ( lowercase__ : tuple ) -> Any:
'''simple docstring'''
return x[0]
def _snake_case ( lowercase__ : str ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :int = get_letter_count(lowerCAmelCase__ )
lowerCAmelCase_ :dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCAmelCase__ )
lowerCAmelCase_ :dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCAmelCase__ )
lowerCAmelCase_ :Dict = ''''''.join(freq_to_letter[freq] )
lowerCAmelCase_ :List[str] = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCAmelCase__ , reverse=lowerCAmelCase__ )
lowerCAmelCase_ :list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCAmelCase__ )
def _snake_case ( lowercase__ : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Any = get_frequency_order(lowerCAmelCase__ )
lowerCAmelCase_ :str = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Union[str, Any]:
if isinstance(__A , __A ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowerCAmelCase_ :Tuple = deepcopy(__A )
elif os.path.exists(__A ):
with io.open(__A , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase_ :str = json.load(__A )
else:
try:
lowerCAmelCase_ :Dict = baseaa.urlsafe_baadecode(__A ).decode("""utf-8""" )
lowerCAmelCase_ :int = json.loads(__A )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
lowerCAmelCase_ :Optional[Any] = config
self.set_stage_and_offload()
def __lowerCAmelCase ( self ) -> Tuple:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowerCAmelCase_ :Tuple = self.get_value("""zero_optimization.stage""" , -1 )
# offload
lowerCAmelCase_ :Dict = False
if self.is_zeroa() or self.is_zeroa():
lowerCAmelCase_ :Optional[int] = set(["""cpu""", """nvme"""] )
lowerCAmelCase_ :Union[str, Any] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowerCAmelCase_ :Optional[int] = True
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
lowerCAmelCase_ :str = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Tuple = ds_key_long.split(""".""" )
lowerCAmelCase_ :List[str] = nodes.pop()
for node in nodes:
lowerCAmelCase_ :Tuple = config.get(__A )
if config is None:
return None, ds_key
return config, ds_key
def __lowerCAmelCase ( self , __A , __A=None ) -> Optional[Any]:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.find_config_node(__A )
if config is None:
return default
return config.get(__A , __A )
def __lowerCAmelCase ( self , __A , __A=False ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Union[str, Any] = ds_key_long.split(""".""" )
for node in nodes:
lowerCAmelCase_ :int = config
lowerCAmelCase_ :Any = config.get(__A )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__A )
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = self.get_value(__A )
return False if value is None else bool(__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
lowerCAmelCase_ :List[str] = self.get_value(__A )
return False if value is None else not bool(__A )
def __lowerCAmelCase ( self ) -> str:
return self._stage == 2
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._stage == 3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._offload
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Optional[int]:
lowerCAmelCase_ :Dict = engine
def __lowerCAmelCase ( self , __A , **__A ) -> str:
# runs backpropagation and handles mixed precision
self.engine.backward(__A , **__A )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> List[str]:
super().__init__(__A , device_placement=__A , scaler=__A )
lowerCAmelCase_ :List[str] = hasattr(self.optimizer , """overflow""" )
def __lowerCAmelCase ( self , __A=None ) -> Optional[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __lowerCAmelCase ( self ) -> List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __lowerCAmelCase ( self ) -> int:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A ) -> Optional[int]:
super().__init__(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=0.0_0_1 , __A=0 , **__A ) -> List[Any]:
lowerCAmelCase_ :str = params
lowerCAmelCase_ :Any = lr
lowerCAmelCase_ :List[Any] = weight_decay
lowerCAmelCase_ :Any = kwargs
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=None , __A=0 , **__A ) -> List[str]:
lowerCAmelCase_ :Optional[int] = optimizer
lowerCAmelCase_ :int = total_num_steps
lowerCAmelCase_ :List[Any] = warmup_num_steps
lowerCAmelCase_ :int = kwargs
| 1 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('T')
class _SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__( self , __A ) -> str:
lowerCAmelCase_ :str = data
lowerCAmelCase_ :Optional[int] = None
def __str__( self ) -> str:
return f"""{self.data}"""
class _SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__( self ) -> None:
lowerCAmelCase_ :Tuple = None
def __iter__( self ) -> Iterator[T]:
lowerCAmelCase_ :Dict = self.top
while node:
yield node.data
lowerCAmelCase_ :Union[str, Any] = node.next
def __str__( self ) -> str:
return "->".join([str(__A ) for item in self] )
def __len__( self ) -> int:
return len(tuple(iter(self ) ) )
def __lowerCAmelCase ( self ) -> bool:
return self.top is None
def __lowerCAmelCase ( self , __A ) -> None:
lowerCAmelCase_ :Tuple = Node(__A )
if not self.is_empty():
lowerCAmelCase_ :Union[str, Any] = self.top
lowerCAmelCase_ :Union[str, Any] = node
def __lowerCAmelCase ( self ) -> T:
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , __A )
lowerCAmelCase_ :Optional[Any] = self.top
lowerCAmelCase_ :Union[str, Any] = self.top.next
return pop_node.data
def __lowerCAmelCase ( self ) -> T:
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def __lowerCAmelCase ( self ) -> None:
lowerCAmelCase_ :Tuple = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 352 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCAmelCase_ :List[str] = "CIDAS/clipseg-rd64-refined"
UpperCAmelCase_ :List[Any] = "image_segmenter"
UpperCAmelCase_ :Optional[int] = CLIPSegForImageSegmentation
UpperCAmelCase_ :Tuple = ["image", "text"]
UpperCAmelCase_ :Dict = ["image"]
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A ) -> Any:
return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" )
def __lowerCAmelCase ( self , __A ) -> Tuple:
with torch.no_grad():
lowerCAmelCase_ :Dict = self.model(**__A ).logits
return logits
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :Optional[int] = outputs.cpu().detach().numpy()
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :str = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 1 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"vocab_file": "spm_char.model"}
__UpperCAmelCase = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
__UpperCAmelCase = {
"microsoft/speecht5_asr": 10_24,
"microsoft/speecht5_tts": 10_24,
"microsoft/speecht5_vc": 10_24,
}
class _SCREAMING_SNAKE_CASE ( lowercase_ ):
UpperCAmelCase_ :Any = VOCAB_FILES_NAMES
UpperCAmelCase_ :Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ :List[str] = ["input_ids", "attention_mask"]
def __init__( self , __A , __A="<s>" , __A="</s>" , __A="<unk>" , __A="<pad>" , __A = None , **__A , ) -> None:
lowerCAmelCase_ :List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , pad_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
lowerCAmelCase_ :Optional[Any] = vocab_file
lowerCAmelCase_ :Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@property
def __lowerCAmelCase ( self ) -> str:
return self.sp_model.get_piece_size()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[str] = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = self.__dict__.copy()
lowerCAmelCase_ :str = None
return state
def __setstate__( self , __A ) -> Dict:
lowerCAmelCase_ :List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase_ :Tuple = {}
lowerCAmelCase_ :Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self , __A ) -> List[str]:
return self.sp_model.encode(a__ , out_type=a__ )
def __lowerCAmelCase ( self , __A ) -> Dict:
return self.sp_model.piece_to_id(a__ )
def __lowerCAmelCase ( self , __A ) -> int:
lowerCAmelCase_ :Any = self.sp_model.IdToPiece(a__ )
return token
def __lowerCAmelCase ( self , __A ) -> str:
lowerCAmelCase_ :int = []
lowerCAmelCase_ :Optional[Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a__ ) + token
lowerCAmelCase_ :Union[str, Any] = []
else:
current_sub_tokens.append(a__ )
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def __lowerCAmelCase ( self , __A , __A=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , __A , __A = None , __A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
lowerCAmelCase_ :List[str] = [1]
if token_ids_a is None:
return ([0] * len(a__ )) + suffix_ones
return ([0] * len(a__ )) + ([0] * len(a__ )) + suffix_ones
def __lowerCAmelCase ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(a__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ :List[str] = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , """wb""" ) as fi:
lowerCAmelCase_ :Tuple = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 353 |
"""simple docstring"""
def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :Dict = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase_ :str = values[index] + knapsack(
lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 )
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 0 |
"""simple docstring"""
from math import ceil
def _snake_case ( lowercase__ : Optional[Any] = 1_0_0_1 ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase_ :List[Any] = 2 * i + 1
lowerCAmelCase_ :Tuple = 2 * i
lowerCAmelCase_ :Optional[int] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 354 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _snake_case ( lowercase__ : bool = True , *lowercase__ : Optional[int] , **lowercase__ : str ) -> Optional[Any]:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
lowerCAmelCase_ :Tuple = False
if main_process_only:
lowerCAmelCase_ :Dict = PartialState().local_process_index == 0
return _tqdm(*lowercase__ , **lowercase__ , disable=lowercase__ )
| 1 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class _SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase_ :Optional[Any] = "unispeech-sat"
def __init__( self , __A=32 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.1 , __A=0.1 , __A=0.0_2 , __A=1E-5 , __A="group" , __A="gelu" , __A=(512, 512, 512, 512, 512, 512, 512) , __A=(5, 2, 2, 2, 2, 2, 2) , __A=(10, 3, 3, 3, 3, 2, 2) , __A=False , __A=128 , __A=16 , __A=False , __A=True , __A=0.0_5 , __A=10 , __A=2 , __A=0.0 , __A=10 , __A=0 , __A=320 , __A=2 , __A=0.1 , __A=100 , __A=256 , __A=256 , __A=0.1 , __A="mean" , __A=False , __A=False , __A=256 , __A=(512, 512, 512, 512, 1500) , __A=(5, 3, 3, 1, 1) , __A=(1, 2, 3, 1, 1) , __A=512 , __A=0 , __A=1 , __A=2 , __A=504 , **__A , ) -> Optional[Any]:
super().__init__(**__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase )
lowerCAmelCase_ :Union[str, Any] = hidden_size
lowerCAmelCase_ :Any = feat_extract_norm
lowerCAmelCase_ :List[str] = feat_extract_activation
lowerCAmelCase_ :Any = list(__lowerCamelCase )
lowerCAmelCase_ :Union[str, Any] = list(__lowerCamelCase )
lowerCAmelCase_ :int = list(__lowerCamelCase )
lowerCAmelCase_ :List[str] = conv_bias
lowerCAmelCase_ :Optional[Any] = num_conv_pos_embeddings
lowerCAmelCase_ :List[str] = num_conv_pos_embedding_groups
lowerCAmelCase_ :Union[str, Any] = len(self.conv_dim )
lowerCAmelCase_ :List[Any] = num_hidden_layers
lowerCAmelCase_ :Any = intermediate_size
lowerCAmelCase_ :Union[str, Any] = hidden_act
lowerCAmelCase_ :List[str] = num_attention_heads
lowerCAmelCase_ :str = hidden_dropout
lowerCAmelCase_ :List[str] = attention_dropout
lowerCAmelCase_ :Union[str, Any] = activation_dropout
lowerCAmelCase_ :List[Any] = feat_proj_dropout
lowerCAmelCase_ :Dict = final_dropout
lowerCAmelCase_ :int = layerdrop
lowerCAmelCase_ :Dict = layer_norm_eps
lowerCAmelCase_ :Union[str, Any] = initializer_range
lowerCAmelCase_ :Optional[int] = vocab_size
lowerCAmelCase_ :Optional[Any] = num_clusters
lowerCAmelCase_ :List[str] = do_stable_layer_norm
lowerCAmelCase_ :Optional[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase_ :Optional[int] = apply_spec_augment
lowerCAmelCase_ :Optional[int] = mask_time_prob
lowerCAmelCase_ :str = mask_time_length
lowerCAmelCase_ :int = mask_time_min_masks
lowerCAmelCase_ :List[Any] = mask_feature_prob
lowerCAmelCase_ :Union[str, Any] = mask_feature_length
lowerCAmelCase_ :Union[str, Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase_ :List[str] = num_codevectors_per_group
lowerCAmelCase_ :str = num_codevector_groups
lowerCAmelCase_ :Any = contrastive_logits_temperature
lowerCAmelCase_ :int = feat_quantizer_dropout
lowerCAmelCase_ :Tuple = num_negatives
lowerCAmelCase_ :List[str] = codevector_dim
lowerCAmelCase_ :Optional[Any] = proj_codevector_dim
lowerCAmelCase_ :int = diversity_loss_weight
# ctc loss
lowerCAmelCase_ :List[Any] = ctc_loss_reduction
lowerCAmelCase_ :Tuple = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase_ :Tuple = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase_ :Dict = list(__lowerCamelCase )
lowerCAmelCase_ :Any = list(__lowerCamelCase )
lowerCAmelCase_ :Optional[int] = list(__lowerCamelCase )
lowerCAmelCase_ :Tuple = xvector_output_dim
@property
def __lowerCAmelCase ( self ) -> Any:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 355 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__UpperCAmelCase = get_tests_dir('fixtures/dummy-config.json')
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = 0
def __lowerCAmelCase ( self ) -> List[str]:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCAmelCase_ :int = os.path.join(__A , """fake-roberta""" )
os.makedirs(__A , exist_ok=__A )
with open(os.path.join(__A , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertEqual(type(__A ) , __A )
def __lowerCAmelCase ( self ) -> Optional[int]:
try:
AutoConfig.register("""custom""" , __A )
# Wrong model type will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""model""" , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""bert""" , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase_ :Union[str, Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Optional[int] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowerCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
__A , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""bert-base""" )
def __lowerCAmelCase ( self ) -> Any:
with self.assertRaisesRegex(
__A , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , revision="""aaaaaa""" )
def __lowerCAmelCase ( self ) -> int:
with self.assertRaisesRegex(
__A , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def __lowerCAmelCase ( self ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
lowerCAmelCase_ :str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def __lowerCAmelCase ( self ) -> int:
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :int = "new-model"
try:
AutoConfig.register("""new-model""" , __A )
# If remote code is not set, the default is to use local
lowerCAmelCase_ :Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
lowerCAmelCase_ :Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 1 | 0 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , _A , )
super().__init__(*_A , **_A )
| 356 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :str = GPTSanJapaneseTokenizer
UpperCAmelCase_ :Optional[int] = False
UpperCAmelCase_ :Optional[int] = {"do_clean_text": False, "add_prefix_space": False}
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
# fmt: off
lowerCAmelCase_ :Dict = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase_ :List[str] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowerCAmelCase_ :int = {"""unk_token""": """<unk>"""}
lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__A ) )
def __lowerCAmelCase ( self , **__A ) -> int:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__A )
def __lowerCAmelCase ( self , __A ) -> Dict:
lowerCAmelCase_ :List[Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowerCAmelCase_ :Optional[int] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def __lowerCAmelCase ( self , __A ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = self.get_input_output_texts(__A )
lowerCAmelCase_ :List[str] = tokenizer.encode(__A , add_special_tokens=__A )
lowerCAmelCase_ :str = tokenizer.decode(__A , clean_up_tokenization_spaces=__A )
return text, ids
def __lowerCAmelCase ( self ) -> str:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Dict:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> int:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Union[str, Any] = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ :Optional[int] = """こんにちは、世界。 こんばんは、㔺界。"""
lowerCAmelCase_ :Any = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowerCAmelCase_ :Tuple = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids without special tokens
lowerCAmelCase_ :List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCAmelCase_ :List[str] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids with special tokens
lowerCAmelCase_ :Any = tokens + [tokenizer.unk_token]
lowerCAmelCase_ :Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCAmelCase_ :Union[str, Any] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ :Optional[int] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowerCAmelCase_ :str = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowerCAmelCase_ :str = tokenizer.encode(__A )
lowerCAmelCase_ :Dict = tokenizer.decode(__A )
self.assertEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase_ :Optional[Any] = """こんにちは、世界。"""
lowerCAmelCase_ :Any = """こんばんは、㔺界。😀"""
lowerCAmelCase_ :Optional[Any] = """こんにちは、世界。こんばんは、世界。😀"""
lowerCAmelCase_ :List[Any] = tokenizer.encode(prefix_text + input_text )
lowerCAmelCase_ :List[str] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowerCAmelCase_ :int = tokenizer.encode(__A , prefix_text=__A )
lowerCAmelCase_ :int = tokenizer.decode(__A )
lowerCAmelCase_ :Dict = tokenizer.decode(__A )
lowerCAmelCase_ :Tuple = tokenizer.decode(__A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase_ :List[Any] = """こんにちは、世界。"""
lowerCAmelCase_ :Optional[int] = """こんばんは、㔺界。😀"""
lowerCAmelCase_ :List[str] = len(tokenizer.encode(__A ) ) - 2
lowerCAmelCase_ :Dict = len(tokenizer.encode(__A ) ) - 2
lowerCAmelCase_ :int = [1] + [0] * (len_prefix + len_text + 1)
lowerCAmelCase_ :List[Any] = [1] * (len_prefix + len_text + 1) + [0]
lowerCAmelCase_ :Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCAmelCase_ :List[Any] = tokenizer(prefix_text + input_text ).token_type_ids
lowerCAmelCase_ :List[str] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowerCAmelCase_ :List[Any] = tokenizer(__A , prefix_text=__A ).token_type_ids
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase_ :int = tokenizer.encode("""あンいワ""" )
lowerCAmelCase_ :Optional[Any] = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowerCAmelCase_ :int = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertNotEqual(__A , __A )
self.assertNotEqual(__A , __A )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase_ :int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowerCAmelCase_ :Dict = tokenizer(__A , padding=__A )
lowerCAmelCase_ :Any = tokenizer.batch_encode_plus(__A , padding=__A )
# fmt: off
lowerCAmelCase_ :int = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowerCAmelCase_ :List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCAmelCase_ :int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __A )
self.assertListEqual(x_token.token_type_ids , __A )
self.assertListEqual(x_token.attention_mask , __A )
self.assertListEqual(x_token_a.input_ids , __A )
self.assertListEqual(x_token_a.token_type_ids , __A )
self.assertListEqual(x_token_a.attention_mask , __A )
def __lowerCAmelCase ( self ) -> Tuple:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __lowerCAmelCase ( self ) -> str:
# tokenizer has no padding token
pass
| 1 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'spiece.model'}
__UpperCAmelCase = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class _SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self , __A , __A=False , __A=True , __A=False , __A="<s>" , __A="</s>" , __A="<unk>" , __A="<sep>" , __A="<pad>" , __A="<cls>" , __A="<mask>" , __A=["<eop>", "<eod>"] , __A = None , **__A , ) -> List[str]:
lowerCAmelCase_ :int = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
lowerCAmelCase_ :Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
lowerCAmelCase_ :List[Any] = 3
lowerCAmelCase_ :List[Any] = do_lower_case
lowerCAmelCase_ :List[Any] = remove_space
lowerCAmelCase_ :List[Any] = keep_accents
lowerCAmelCase_ :List[Any] = vocab_file
lowerCAmelCase_ :Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__snake_case )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
lowerCAmelCase_ :Optional[Any] = jieba
lowerCAmelCase_ :List[str] = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return len(self.sp_model )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :List[str] = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
lowerCAmelCase_ :Dict = self.__dict__.copy()
lowerCAmelCase_ :int = None
return state
def __setstate__( self , __A ) -> str:
lowerCAmelCase_ :Optional[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase_ :int = {}
lowerCAmelCase_ :Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self , __A ) -> Any:
if self.remove_space:
lowerCAmelCase_ :Optional[Any] = ' '.join(inputs.strip().split() )
else:
lowerCAmelCase_ :str = inputs
lowerCAmelCase_ :List[Any] = outputs.replace("""``""" , """\"""" ).replace("""\'\'""" , """\"""" )
if not self.keep_accents:
lowerCAmelCase_ :List[Any] = unicodedata.normalize("""NFKD""" , __snake_case )
lowerCAmelCase_ :str = ''.join([c for c in outputs if not unicodedata.combining(__snake_case )] )
if self.do_lower_case:
lowerCAmelCase_ :Dict = outputs.lower()
return outputs
def __lowerCAmelCase ( self , __A ) -> List[str]:
lowerCAmelCase_ :Dict = self.preprocess_text(__snake_case )
lowerCAmelCase_ :Union[str, Any] = self.sp_model.encode(__snake_case , out_type=__snake_case )
lowerCAmelCase_ :Tuple = []
for piece in pieces:
if len(__snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCAmelCase_ :int = self.sp_model.EncodeAsPieces(piece[:-1].replace(__snake_case , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase_ :List[str] = cur_pieces[1:]
else:
lowerCAmelCase_ :Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__snake_case )
else:
new_pieces.append(__snake_case )
return new_pieces
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
return self.sp_model.PieceToId(__snake_case )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
return self.sp_model.IdToPiece(__snake_case )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
lowerCAmelCase_ :Union[str, Any] = ''.join(__snake_case ).replace(__snake_case , """ """ ).strip()
return out_string
def __lowerCAmelCase ( self , __A , __A = None ) -> Optional[Any]:
lowerCAmelCase_ :List[Any] = [self.sep_token_id]
lowerCAmelCase_ :Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCAmelCase ( self , __A , __A = None , __A = False ) -> List[Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is not None:
return ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1, 1]
return ([0] * len(__snake_case )) + [1, 1]
def __lowerCAmelCase ( self , __A , __A = None ) -> Any:
lowerCAmelCase_ :List[str] = [self.sep_token_id]
lowerCAmelCase_ :Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCAmelCase ( self , __A , __A = None ) -> List[str]:
if not os.path.isdir(__snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ :Any = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , """wb""" ) as fi:
lowerCAmelCase_ :Any = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
def __lowerCAmelCase ( self , *__A , **__A ) -> int:
lowerCAmelCase_ :Any = super()._decode(*__snake_case , **__snake_case )
lowerCAmelCase_ :List[Any] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 357 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__UpperCAmelCase = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
__UpperCAmelCase = dataset.iloc[:, 1:2].values
__UpperCAmelCase = dataset.iloc[:, 2].values
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0)
__UpperCAmelCase = PolynomialFeatures(degree=4)
__UpperCAmelCase = poly_reg.fit_transform(X)
__UpperCAmelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def _snake_case ( ) -> str:
'''simple docstring'''
plt.scatter(lowercase__ , lowercase__ , color="""red""" )
plt.plot(lowercase__ , pol_reg.predict(poly_reg.fit_transform(lowercase__ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 1 | 0 |
from typing import Any
import numpy as np
def _snake_case ( lowercase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
return np.array_equal(lowerCAmelCase__ , matrix.conjugate().T )
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :Any = v.conjugate().T
lowerCAmelCase_ :Any = v_star.dot(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , np.ndarray )
return (v_star_dot.dot(lowerCAmelCase__ )) / (v_star.dot(lowerCAmelCase__ ))
def _snake_case ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[str] = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
lowerCAmelCase_ :Any = np.array([[1], [2], [3]] )
assert is_hermitian(lowerCAmelCase__ ), f"""{a} is not hermitian."""
print(rayleigh_quotient(lowerCAmelCase__ , lowerCAmelCase__ ) )
lowerCAmelCase_ :int = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowerCAmelCase__ ), f"""{a} is not hermitian."""
assert rayleigh_quotient(lowerCAmelCase__ , lowerCAmelCase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 358 |
"""simple docstring"""
from __future__ import annotations
__UpperCAmelCase = 1.6021e-19 # units = C
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float , ) -> tuple[str, float]:
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__a )
class _SCREAMING_SNAKE_CASE ( __a ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
UpperCAmelCase_ :str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
UpperCAmelCase_ :ClassVar[Features] = Features({"text": Value("string" )} )
UpperCAmelCase_ :ClassVar[Features] = Features({"summary": Value("string" )} )
UpperCAmelCase_ :str = "text"
UpperCAmelCase_ :str = "summary"
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
return {self.text_column: "text", self.summary_column: "summary"}
| 359 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 1 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : int ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception("""Principal borrowed must be > 0""" )
if rate_per_annum < 0:
raise Exception("""Rate of interest must be >= 0""" )
if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ):
raise Exception("""Years to repay must be an integer > 0""" )
# Yearly rate is divided by 12 to get monthly rate
lowerCAmelCase_ :Optional[Any] = rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowerCAmelCase_ :List[str] = years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _snake_case ( lowercase__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
lowerCAmelCase_ :Dict = f"""https://www.amazon.in/laptop/s?k={product}"""
lowerCAmelCase_ :List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
lowerCAmelCase_ :List[Any] = BeautifulSoup(requests.get(lowercase__ , headers=lowercase__ ).text )
# Initialize a Pandas dataframe with the column titles
lowerCAmelCase_ :Union[str, Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
lowerCAmelCase_ :str = item.ha.text
lowerCAmelCase_ :Dict = """https://www.amazon.in/""" + item.ha.a["""href"""]
lowerCAmelCase_ :int = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
lowerCAmelCase_ :Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
lowerCAmelCase_ :int = """Not available"""
try:
lowerCAmelCase_ :str = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
lowerCAmelCase_ :Optional[Any] = """"""
try:
lowerCAmelCase_ :str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_0_0 )
except ValueError:
lowerCAmelCase_ :Union[str, Any] = float("""nan""" )
except AttributeError:
pass
lowerCAmelCase_ :Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCAmelCase_ :List[Any] = """ """
lowerCAmelCase_ :Tuple = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__UpperCAmelCase = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 1 | 0 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__UpperCAmelCase = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
__UpperCAmelCase = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'emoji': True,
},
}
]
__UpperCAmelCase = 0
for log in Path().glob('*.log'):
__UpperCAmelCase = 0
with open(log, 'r') as f:
for line in f:
__UpperCAmelCase = json.loads(line)
if line.get('nodeid', '') != "":
__UpperCAmelCase = line['nodeid']
if line.get('duration', None) is not None:
__UpperCAmelCase = F"""{line["duration"]:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__UpperCAmelCase = []
log.unlink()
__UpperCAmelCase = ''
__UpperCAmelCase = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
__UpperCAmelCase = []
__UpperCAmelCase = {}
for test in failed_tests:
__UpperCAmelCase = test[0].split('::')
__UpperCAmelCase = data[0].split('/')[-1]
if data[0] not in filesafailed:
__UpperCAmelCase = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__UpperCAmelCase = [test[0] for test in failed_table]
__UpperCAmelCase = list(set(files))
# Count number of instances in failed_tests
__UpperCAmelCase = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__UpperCAmelCase = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
__UpperCAmelCase = 'Too many failed tests, please see the full report in the Action results.'
__UpperCAmelCase = len(err) + 10
__UpperCAmelCase = message[: 30_00 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
__UpperCAmelCase = 'No failed tests! 🤗'
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
__UpperCAmelCase = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
__UpperCAmelCase = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
__UpperCAmelCase = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
__UpperCAmelCase = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
__UpperCAmelCase = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
__UpperCAmelCase = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__UpperCAmelCase = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
__UpperCAmelCase = row[0]
else:
__UpperCAmelCase = ''
__UpperCAmelCase = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 361 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Any = """laion/clap-htsat-unfused"""
lowerCAmelCase_ :Optional[Any] = tempfile.mkdtemp()
def __lowerCAmelCase ( self , **__A ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self , **__A ) -> Tuple:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :Optional[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Dict = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase_ :Dict = self.get_feature_extractor(do_normalize=__A , padding_value=1.0 )
lowerCAmelCase_ :Union[str, Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = self.get_feature_extractor()
lowerCAmelCase_ :str = self.get_tokenizer()
lowerCAmelCase_ :List[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :Optional[Any] = floats_list((3, 1000) )
lowerCAmelCase_ :Optional[Any] = feature_extractor(__A , return_tensors="""np""" )
lowerCAmelCase_ :str = processor(audios=__A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :List[Any] = """This is a test string"""
lowerCAmelCase_ :Dict = processor(text=__A )
lowerCAmelCase_ :List[str] = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :int = self.get_feature_extractor()
lowerCAmelCase_ :Tuple = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ :Tuple = processor.batch_decode(__A )
lowerCAmelCase_ :Optional[Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 1 | 0 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCAmelCase = 2
class _SCREAMING_SNAKE_CASE :
def __init__( self , *, # begin keyword-only arguments
__A="<s>" , __A="<pad>" , __A="</s>" , __A="<unk>" , __A=None , ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = bos, unk, pad, eos
lowerCAmelCase_ :Optional[Any] = []
lowerCAmelCase_ :Dict = []
lowerCAmelCase_ :List[str] = {}
lowerCAmelCase_ :int = self.add_symbol(UpperCamelCase__ )
lowerCAmelCase_ :List[str] = self.add_symbol(UpperCamelCase__ )
lowerCAmelCase_ :int = self.add_symbol(UpperCamelCase__ )
lowerCAmelCase_ :Union[str, Any] = self.add_symbol(UpperCamelCase__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCamelCase__ )
lowerCAmelCase_ :List[Any] = len(self.symbols )
def __eq__( self , __A ) -> str:
return self.indices == other.indices
def __getitem__( self , __A ) -> List[Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ) -> List[str]:
return len(self.symbols )
def __contains__( self , __A ) -> List[Any]:
return sym in self.indices
@classmethod
def __lowerCAmelCase ( cls , __A ) -> Optional[int]:
lowerCAmelCase_ :Tuple = cls()
d.add_from_file(UpperCamelCase__ )
return d
def __lowerCAmelCase ( self , __A , __A=1 , __A=False ) -> Tuple:
if word in self.indices and not overwrite:
lowerCAmelCase_ :Tuple = self.indices[word]
lowerCAmelCase_ :int = self.count[idx] + n
return idx
else:
lowerCAmelCase_ :Any = len(self.symbols )
lowerCAmelCase_ :Union[str, Any] = idx
self.symbols.append(UpperCamelCase__ )
self.count.append(UpperCamelCase__ )
return idx
def __lowerCAmelCase ( self , __A ) -> int:
return 0
def __lowerCAmelCase ( self , __A ) -> str:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
try:
with open(UpperCamelCase__ , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(UpperCamelCase__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(UpperCamelCase__ ) )
return
lowerCAmelCase_ :Dict = f.readlines()
lowerCAmelCase_ :int = self._load_meta(UpperCamelCase__ )
for line in lines[indices_start_line:]:
try:
lowerCAmelCase_ , lowerCAmelCase_ :str = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
lowerCAmelCase_ :str = True
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = line.rsplit(""" """ , 1 )
else:
lowerCAmelCase_ :int = False
lowerCAmelCase_ :Union[str, Any] = int(UpperCamelCase__ )
lowerCAmelCase_ :Union[str, Any] = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(UpperCamelCase__ ) )
self.add_symbol(UpperCamelCase__ , n=UpperCamelCase__ , overwrite=UpperCamelCase__ )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def _snake_case ( lowercase__ : int ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :int = dict((re.sub(r"""@@$""" , """""" , UpperCAmelCase__ ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , UpperCAmelCase__ ), v) for k, v in d.items() )
lowerCAmelCase_ :Tuple = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
lowerCAmelCase_ :Dict = d[k] # restore
return da
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : List[str] ) -> Any:
'''simple docstring'''
if not os.path.exists(UpperCAmelCase__ ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
lowerCAmelCase_ :List[Any] = os.path.join(UpperCAmelCase__ , """checkpoint.pt""" )
if not os.path.isfile(UpperCAmelCase__ ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
lowerCAmelCase_ :Tuple = torch.load(UpperCAmelCase__ , map_location="""cpu""" )
lowerCAmelCase_ :Optional[Any] = chkpt["""cfg"""]["""model"""]
# dicts
lowerCAmelCase_ :Optional[int] = os.path.join(UpperCAmelCase__ , """dict.txt""" )
if not os.path.isfile(UpperCAmelCase__ ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
lowerCAmelCase_ :Optional[Any] = Dictionary.load(UpperCAmelCase__ )
lowerCAmelCase_ :List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCAmelCase_ :Optional[Any] = len(UpperCAmelCase__ )
lowerCAmelCase_ :int = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ , indent=UpperCAmelCase__ ) )
# merges_file (bpecodes)
lowerCAmelCase_ :Tuple = os.path.join(UpperCAmelCase__ , """bpecodes""" )
if not os.path.isfile(UpperCAmelCase__ ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
lowerCAmelCase_ :int = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(UpperCAmelCase__ , UpperCAmelCase__ )
# model config
lowerCAmelCase_ :Union[str, Any] = os.path.join(UpperCAmelCase__ , """config.json""" )
lowerCAmelCase_ :List[Any] = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1E-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ , indent=UpperCAmelCase__ ) )
# tokenizer config
lowerCAmelCase_ :Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase_ :Union[str, Any] = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1_0_2_4,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ , indent=UpperCAmelCase__ ) )
# model
lowerCAmelCase_ :Tuple = chkpt["""model"""]
# remove unneeded keys
lowerCAmelCase_ :Optional[int] = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase_ :Optional[int] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
lowerCAmelCase_ :str = model_state_dict.pop(UpperCAmelCase__ )
else:
lowerCAmelCase_ :Dict = model_state_dict.pop(UpperCAmelCase__ )
lowerCAmelCase_ :Optional[Any] = BioGptConfig.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase_ :Union[str, Any] = BioGptForCausalLM(UpperCAmelCase__ )
# check that it loads ok
model_new.load_state_dict(UpperCAmelCase__ )
# save
lowerCAmelCase_ :Tuple = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
print("""Conversion is done!""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCAmelCase = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 362 |
"""simple docstring"""
import os
from math import logaa
def _snake_case ( lowercase__ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
lowerCAmelCase_ :float = 0
lowerCAmelCase_ :Union[str, Any] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = list(map(lowercase__ , line.split(""",""" ) ) )
if x * logaa(lowercase__ ) > largest:
lowerCAmelCase_ :Any = x * logaa(lowercase__ )
lowerCAmelCase_ :List[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case__ ):
UpperCAmelCase_ :Union[List[PIL.Image.Image], np.ndarray]
UpperCAmelCase_ :Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 363 |
"""simple docstring"""
import itertools
import math
def _snake_case ( lowercase__ : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = 2
while True:
if is_prime(lowercase__ ):
yield num
num += 1
def _snake_case ( lowercase__ : int = 1_0_0_0_1 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , lowercase__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 | 0 |
"""simple docstring"""
__UpperCAmelCase = 'Tobias Carryer'
from time import time
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A , __A , __A=int(time() ) ) -> Any: # noqa: B008
lowerCAmelCase_ :Union[str, Any] = multiplier
lowerCAmelCase_ :Optional[int] = increment
lowerCAmelCase_ :Optional[int] = modulo
lowerCAmelCase_ :int = seed
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__UpperCAmelCase = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31)
while True:
print(lcg.next_number())
| 364 |
"""simple docstring"""
def _snake_case ( lowercase__ : int = 5_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 | 0 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__UpperCAmelCase = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Tuple = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
lowerCAmelCase_ :Any = self.transformer_dir
shutil.copy(
os.path.join(__A , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :str = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def __lowerCAmelCase ( self , __A , __A , __A , __A=None ) -> Optional[Any]:
lowerCAmelCase_ :str = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCAmelCase_ :List[str] = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCAmelCase_ :List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase_ :Union[str, Any] = black.format_str(__A , mode=__A )
lowerCAmelCase_ :List[str] = os.path.join(self.transformer_dir , """new_code.py""" )
with open(__A , """w""" , newline="""\n""" ) as f:
f.write(__A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__A )
with open(__A , """r""" ) as f:
self.assertTrue(f.read() , __A )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :str = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(__A , __A )
def __lowerCAmelCase ( self ) -> List[str]:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , __A , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , __A ) , )
# Copy consistency with a really long name
lowerCAmelCase_ :int = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub("""Bert""" , __A , __A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , __A , overwrite_result=re.sub("""Bert""" , """TestModel""" , __A ) , )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Any = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
lowerCAmelCase_ :str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
lowerCAmelCase_ :List[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
lowerCAmelCase_ :Union[str, Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
lowerCAmelCase_ , lowerCAmelCase_ :Any = check_copies.convert_to_localized_md(
__A , __A , localized_readme["""format_model_list"""] )
self.assertFalse(__A )
self.assertEqual(__A , __A )
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = check_copies.convert_to_localized_md(
__A , __A , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__A )
lowerCAmelCase_ :List[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
lowerCAmelCase_ :Union[str, Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
lowerCAmelCase_ :Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
lowerCAmelCase_ , lowerCAmelCase_ :int = check_copies.convert_to_localized_md(
__A , __A , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(__A , __A )
| 365 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
UpperCAmelCase_ :Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ :Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :List[Any] = CLIPTextModel(__A )
lowerCAmelCase_ :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Union[str, Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> List[str]:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Tuple = torch.manual_seed(__A )
else:
lowerCAmelCase_ :Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :List[Any] = 2
lowerCAmelCase_ :int = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , )
lowerCAmelCase_ :Optional[int] = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :List[str] = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__A ):
if isinstance(__A , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :str = CLIPTextModel(__A )
lowerCAmelCase_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Optional[Any] = MultiControlNetModel([controlneta, controlneta] )
lowerCAmelCase_ :List[Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> str:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Optional[Any] = torch.manual_seed(__A )
else:
lowerCAmelCase_ :List[Any] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :Optional[Any] = 2
lowerCAmelCase_ :Optional[int] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
]
lowerCAmelCase_ :int = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
lowerCAmelCase_ :Union[str, Any] = 1_0.0
lowerCAmelCase_ :Union[str, Any] = 4
lowerCAmelCase_ :Tuple = self.get_dummy_inputs(__A )
lowerCAmelCase_ :List[str] = steps
lowerCAmelCase_ :int = scale
lowerCAmelCase_ :Union[str, Any] = pipe(**__A )[0]
lowerCAmelCase_ :Any = self.get_dummy_inputs(__A )
lowerCAmelCase_ :str = steps
lowerCAmelCase_ :str = scale
lowerCAmelCase_ :Tuple = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Union[str, Any] = steps
lowerCAmelCase_ :Union[str, Any] = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowerCAmelCase_ :List[str] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Optional[int] = steps
lowerCAmelCase_ :Tuple = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowerCAmelCase ( self ) -> Dict:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> Optional[int]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :str = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__A )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Any = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
lowerCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=__A , controlnet=__A )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase_ :List[Any] = """evil space-punk bird"""
lowerCAmelCase_ :List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
lowerCAmelCase_ :int = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
lowerCAmelCase_ :Union[str, Any] = pipe(
__A , __A , control_image=__A , generator=__A , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
lowerCAmelCase_ :Tuple = output.images[0]
assert image.shape == (512, 512, 3)
lowerCAmelCase_ :Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 1 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase_ , unittest.TestCase ):
UpperCAmelCase_ :Optional[Any] = DDIMPipeline
UpperCAmelCase_ :str = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCAmelCase_ :Any = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
UpperCAmelCase_ :str = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
UpperCAmelCase_ :Optional[Any] = False
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
lowerCAmelCase_ :int = DDIMScheduler()
lowerCAmelCase_ :Any = {'''unet''': unet, '''scheduler''': scheduler}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> Optional[Any]:
if str(__lowercase ).startswith("""mps""" ):
lowerCAmelCase_ :List[Any] = torch.manual_seed(__lowercase )
else:
lowerCAmelCase_ :List[str] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
lowerCAmelCase_ :List[Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[Any] = '''cpu'''
lowerCAmelCase_ :int = self.get_dummy_components()
lowerCAmelCase_ :List[Any] = self.pipeline_class(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ :Any = self.get_dummy_inputs(__lowercase )
lowerCAmelCase_ :Union[str, Any] = pipe(**__lowercase ).images
lowerCAmelCase_ :List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
lowerCAmelCase_ :int = np.array(
[1.0_00E00, 5.7_17E-01, 4.7_17E-01, 1.0_00E00, 0.0_00E00, 1.0_00E00, 3.0_00E-04, 0.0_00E00, 9.0_00E-04] )
lowerCAmelCase_ :str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowercase , 1E-3 )
def __lowerCAmelCase ( self ) -> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :List[str] = '''google/ddpm-cifar10-32'''
lowerCAmelCase_ :Optional[Any] = UNetaDModel.from_pretrained(__lowercase )
lowerCAmelCase_ :Dict = DDIMScheduler()
lowerCAmelCase_ :Optional[Any] = DDIMPipeline(unet=__lowercase , scheduler=__lowercase )
ddim.to(__lowercase )
ddim.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ :List[str] = torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = ddim(generator=__lowercase , eta=0.0 , output_type="""numpy""" ).images
lowerCAmelCase_ :Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ :Union[str, Any] = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[int] = '''google/ddpm-ema-bedroom-256'''
lowerCAmelCase_ :str = UNetaDModel.from_pretrained(__lowercase )
lowerCAmelCase_ :int = DDIMScheduler.from_pretrained(__lowercase )
lowerCAmelCase_ :Any = DDIMPipeline(unet=__lowercase , scheduler=__lowercase )
ddpm.to(__lowercase )
ddpm.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ :Dict = torch.manual_seed(0 )
lowerCAmelCase_ :Any = ddpm(generator=__lowercase , output_type="""numpy""" ).images
lowerCAmelCase_ :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase_ :Dict = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 366 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ ):
UpperCAmelCase_ :List[str] = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self , __A , __A , __A = None , __A = 5_0257 , __A = 1024 , __A = 768 , __A = 12 , __A = 12 , __A = None , __A = "gelu_new" , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 1E-5 , __A = 0.0_2 , __A = True , __A = True , __A = False , __A = False , ) -> Optional[Any]:
super().__init__()
lowerCAmelCase_ :List[str] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
lowerCAmelCase_ :Union[str, Any] = prefix_inner_dim
lowerCAmelCase_ :str = prefix_hidden_dim
lowerCAmelCase_ :str = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCAmelCase_ :List[Any] = (
nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCAmelCase_ :Any = GPTaConfig(
vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , )
lowerCAmelCase_ :Any = GPTaLMHeadModel(__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , __A = None , ) -> List[str]:
lowerCAmelCase_ :str = self.transformer.transformer.wte(__A )
lowerCAmelCase_ :Any = self.encode_prefix(__A )
lowerCAmelCase_ :Optional[Any] = self.decode_prefix(__A )
lowerCAmelCase_ :Optional[int] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCAmelCase_ :int = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCAmelCase_ :Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCAmelCase_ :Tuple = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __lowerCAmelCase ( self , __A , __A ) -> torch.Tensor:
return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
return self.encode_prefix(__A )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[int]:
lowerCAmelCase_ :Tuple = torch.split(__A , 1 , dim=0 )
lowerCAmelCase_ :Optional[int] = []
lowerCAmelCase_ :List[str] = []
for feature in features:
lowerCAmelCase_ :Tuple = self.decode_prefix(feature.to(__A ) ) # back to the clip feature
# Only support beam search for now
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.generate_beam(
input_embeds=__A , device=__A , eos_token_id=__A )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCAmelCase_ :Tuple = torch.stack(__A )
lowerCAmelCase_ :int = torch.stack(__A )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A = 5 , __A = 67 , __A = 1.0 , __A = None , ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = eos_token_id
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :Any = None
lowerCAmelCase_ :int = torch.ones(__A , device=__A , dtype=torch.int )
lowerCAmelCase_ :Optional[int] = torch.zeros(__A , device=__A , dtype=torch.bool )
if input_embeds is not None:
lowerCAmelCase_ :List[str] = input_embeds
else:
lowerCAmelCase_ :Union[str, Any] = self.transformer.transformer.wte(__A )
for i in range(__A ):
lowerCAmelCase_ :Optional[int] = self.transformer(inputs_embeds=__A )
lowerCAmelCase_ :str = outputs.logits
lowerCAmelCase_ :str = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCAmelCase_ :Dict = logits.softmax(-1 ).log()
if scores is None:
lowerCAmelCase_ , lowerCAmelCase_ :Any = logits.topk(__A , -1 )
lowerCAmelCase_ :Union[str, Any] = generated.expand(__A , *generated.shape[1:] )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCAmelCase_ :List[str] = next_tokens
else:
lowerCAmelCase_ :List[Any] = tokens.expand(__A , *tokens.shape[1:] )
lowerCAmelCase_ :Any = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCAmelCase_ :List[Any] = -float(np.inf )
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Optional[int] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCAmelCase_ :List[Any] = scores_sum / seq_lengths[:, None]
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = scores_sum_average.view(-1 ).topk(__A , -1 )
lowerCAmelCase_ :Optional[Any] = next_tokens // scores_sum.shape[1]
lowerCAmelCase_ :Dict = seq_lengths[next_tokens_source]
lowerCAmelCase_ :Tuple = next_tokens % scores_sum.shape[1]
lowerCAmelCase_ :Optional[Any] = next_tokens.unsqueeze(1 )
lowerCAmelCase_ :str = tokens[next_tokens_source]
lowerCAmelCase_ :List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
lowerCAmelCase_ :Dict = generated[next_tokens_source]
lowerCAmelCase_ :Dict = scores_sum_average * seq_lengths
lowerCAmelCase_ :Tuple = is_stopped[next_tokens_source]
lowerCAmelCase_ :str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCAmelCase_ :List[Any] = torch.cat((generated, next_token_embed) , dim=1 )
lowerCAmelCase_ :Optional[int] = is_stopped + next_tokens.eq(__A ).squeeze()
if is_stopped.all():
break
lowerCAmelCase_ :str = scores / seq_lengths
lowerCAmelCase_ :Optional[int] = scores.argsort(descending=__A )
# tokens tensors are already padded to max_seq_length
lowerCAmelCase_ :Optional[Any] = [tokens[i] for i in order]
lowerCAmelCase_ :Dict = torch.stack(__A , dim=0 )
lowerCAmelCase_ :Tuple = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 1 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ :Optional[Any] = """resnet"""
UpperCAmelCase_ :Tuple = ["""basic""", """bottleneck"""]
def __init__( self , __A=3 , __A=64 , __A=[256, 512, 1024, 2048] , __A=[3, 4, 6, 3] , __A="bottleneck" , __A="relu" , __A=False , __A=None , __A=None , **__A , ) -> str:
super().__init__(**__a )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
lowerCAmelCase_ :Any = num_channels
lowerCAmelCase_ :List[str] = embedding_size
lowerCAmelCase_ :Optional[Any] = hidden_sizes
lowerCAmelCase_ :Optional[Any] = depths
lowerCAmelCase_ :Tuple = layer_type
lowerCAmelCase_ :int = hidden_act
lowerCAmelCase_ :List[str] = downsample_in_first_stage
lowerCAmelCase_ :Optional[int] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(__a ) + 1 )]
lowerCAmelCase_ , lowerCAmelCase_ :Any = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
class _SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
UpperCAmelCase_ :int = version.parse("1.11" )
@property
def __lowerCAmelCase ( self ) -> Tuple:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return 1E-3
| 367 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "detr"
UpperCAmelCase_ :str = ["past_key_values"]
UpperCAmelCase_ :Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __A=True , __A=None , __A=3 , __A=100 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.0_2 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=1 , __A=5 , __A=2 , __A=1 , __A=1 , __A=5 , __A=2 , __A=0.1 , **__A , ) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase_ :int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__A , __A ):
lowerCAmelCase_ :str = backbone_config.get("""model_type""" )
lowerCAmelCase_ :List[Any] = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ :Optional[Any] = config_class.from_dict(__A )
# set timm attributes to None
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = None, None, None
lowerCAmelCase_ :Tuple = use_timm_backbone
lowerCAmelCase_ :Optional[int] = backbone_config
lowerCAmelCase_ :Optional[int] = num_channels
lowerCAmelCase_ :int = num_queries
lowerCAmelCase_ :List[Any] = d_model
lowerCAmelCase_ :Optional[int] = encoder_ffn_dim
lowerCAmelCase_ :Tuple = encoder_layers
lowerCAmelCase_ :int = encoder_attention_heads
lowerCAmelCase_ :Optional[Any] = decoder_ffn_dim
lowerCAmelCase_ :List[str] = decoder_layers
lowerCAmelCase_ :Dict = decoder_attention_heads
lowerCAmelCase_ :Dict = dropout
lowerCAmelCase_ :Tuple = attention_dropout
lowerCAmelCase_ :Union[str, Any] = activation_dropout
lowerCAmelCase_ :Any = activation_function
lowerCAmelCase_ :List[str] = init_std
lowerCAmelCase_ :Optional[int] = init_xavier_std
lowerCAmelCase_ :int = encoder_layerdrop
lowerCAmelCase_ :Union[str, Any] = decoder_layerdrop
lowerCAmelCase_ :List[str] = encoder_layers
lowerCAmelCase_ :Union[str, Any] = auxiliary_loss
lowerCAmelCase_ :str = position_embedding_type
lowerCAmelCase_ :List[Any] = backbone
lowerCAmelCase_ :str = use_pretrained_backbone
lowerCAmelCase_ :str = dilation
# Hungarian matcher
lowerCAmelCase_ :List[Any] = class_cost
lowerCAmelCase_ :Union[str, Any] = bbox_cost
lowerCAmelCase_ :Tuple = giou_cost
# Loss coefficients
lowerCAmelCase_ :Optional[int] = mask_loss_coefficient
lowerCAmelCase_ :Union[str, Any] = dice_loss_coefficient
lowerCAmelCase_ :Tuple = bbox_loss_coefficient
lowerCAmelCase_ :Tuple = giou_loss_coefficient
lowerCAmelCase_ :Dict = eos_coefficient
super().__init__(is_encoder_decoder=__A , **__A )
@property
def __lowerCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) -> int:
return self.d_model
@classmethod
def __lowerCAmelCase ( cls , __A , **__A ) -> Any:
return cls(backbone_config=__A , **__A )
def __lowerCAmelCase ( self ) -> Dict[str, any]:
lowerCAmelCase_ :List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase_ :Dict = self.backbone_config.to_dict()
lowerCAmelCase_ :str = self.__class__.model_type
return output
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[Any] = version.parse("1.11" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __lowerCAmelCase ( self ) -> float:
return 1E-5
@property
def __lowerCAmelCase ( self ) -> int:
return 12
| 1 | 0 |
"""simple docstring"""
import string
def _snake_case ( lowercase__ : Optional[Any] ) -> None:
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
lowerCAmelCase_ :int = ''
for symbol in message:
if symbol in string.ascii_uppercase:
lowerCAmelCase_ :int = string.ascii_uppercase.find(lowerCAmelCase_ )
lowerCAmelCase_ :str = num - key
if num < 0:
lowerCAmelCase_ :Optional[int] = num + len(string.ascii_uppercase )
lowerCAmelCase_ :Optional[int] = translated + string.ascii_uppercase[num]
else:
lowerCAmelCase_ :List[Any] = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def _snake_case ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ :str = input("""Encrypted message: """ )
lowerCAmelCase_ :Dict = message.upper()
decrypt(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 368 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['DeiTFeatureExtractor']
__UpperCAmelCase = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : Any ) -> list:
'''simple docstring'''
lowerCAmelCase_ :List[str] = [0] * len(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
# use last results for better performance - dynamic programming
lowerCAmelCase_ :Optional[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowerCAmelCase_ :List[Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowerCAmelCase_ :Union[str, Any] = j
return prefix_result
def _snake_case ( lowercase__ : List[Any] ) -> int:
'''simple docstring'''
return max(prefix_function(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 0 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__UpperCAmelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
UpperCAmelCase_ :Optional[datasets.Features] = None
UpperCAmelCase_ :str = "utf-8"
UpperCAmelCase_ :Optional[str] = None
UpperCAmelCase_ :Optional[str] = None
UpperCAmelCase_ :bool = True # deprecated
UpperCAmelCase_ :Optional[int] = None # deprecated
UpperCAmelCase_ :int = 10 << 20 # 10MB
UpperCAmelCase_ :Optional[bool] = None
class _SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
UpperCAmelCase_ :Optional[Any] = JsonConfig
def __lowerCAmelCase ( self ) -> List[str]:
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
lowerCAmelCase_ :str = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self , __A ) -> List[str]:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowerCAmelCase_ :Any = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__snake_case , (str, list, tuple) ):
lowerCAmelCase_ :str = data_files
if isinstance(__snake_case , __snake_case ):
lowerCAmelCase_ :Union[str, Any] = [files]
lowerCAmelCase_ :List[Any] = [dl_manager.iter_files(__snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
lowerCAmelCase_ :int = []
for split_name, files in data_files.items():
if isinstance(__snake_case , __snake_case ):
lowerCAmelCase_ :Dict = [files]
lowerCAmelCase_ :Tuple = [dl_manager.iter_files(__snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=__snake_case , gen_kwargs={"""files""": files} ) )
return splits
def __lowerCAmelCase ( self , __A ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
lowerCAmelCase_ :str = self.config.features.arrow_schema.field(__snake_case ).type
lowerCAmelCase_ :Tuple = pa_table.append_column(__snake_case , pa.array([None] * len(__snake_case ) , type=__snake_case ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCAmelCase_ :Optional[Any] = table_cast(__snake_case , self.config.features.arrow_schema )
return pa_table
def __lowerCAmelCase ( self , __A ) -> List[Any]:
for file_idx, file in enumerate(itertools.chain.from_iterable(__snake_case ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCAmelCase_ :List[Any] = json.load(__snake_case )
# We keep only the field we are interested in
lowerCAmelCase_ :int = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__snake_case , (list, tuple) ):
lowerCAmelCase_ :Optional[int] = set().union(*[row.keys() for row in dataset] )
lowerCAmelCase_ :int = {col: [row.get(__snake_case ) for row in dataset] for col in keys}
else:
lowerCAmelCase_ :Optional[Any] = dataset
lowerCAmelCase_ :Optional[Any] = pa.Table.from_pydict(__snake_case )
yield file_idx, self._cast_table(__snake_case )
# If the file has one json object per line
else:
with open(__snake_case , """rb""" ) as f:
lowerCAmelCase_ :Tuple = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowerCAmelCase_ :List[str] = max(self.config.chunksize // 32 , 16 << 10 )
lowerCAmelCase_ :str = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
lowerCAmelCase_ :Any = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__snake_case )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowerCAmelCase_ :str = batch.decode(self.config.encoding , errors=__snake_case ).encode("""utf-8""" )
try:
while True:
try:
lowerCAmelCase_ :List[Any] = paj.read_json(
io.BytesIO(__snake_case ) , read_options=paj.ReadOptions(block_size=__snake_case ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__snake_case , pa.ArrowInvalid )
and "straddling" not in str(__snake_case )
or block_size > len(__snake_case )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(__snake_case )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCAmelCase_ :str = json.load(__snake_case )
except json.JSONDecodeError:
logger.error(f"""Failed to read file \'{file}\' with error {type(__snake_case )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__snake_case , __snake_case ): # list is the only sequence type supported in JSON
try:
lowerCAmelCase_ :Optional[Any] = set().union(*[row.keys() for row in dataset] )
lowerCAmelCase_ :Dict = {col: [row.get(__snake_case ) for row in dataset] for col in keys}
lowerCAmelCase_ :str = pa.Table.from_pydict(__snake_case )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file \'{file}\' with error {type(__snake_case )}: {e}""" )
raise ValueError(f"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(__snake_case )
break
else:
logger.error(f"""Failed to read file \'{file}\' with error {type(__snake_case )}: {e}""" )
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
f"""Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__snake_case )
batch_idx += 1
| 370 |
"""simple docstring"""
__UpperCAmelCase = 2_56
# Modulus to hash a string
__UpperCAmelCase = 1_00_00_03
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :Tuple = len(lowercase__ )
lowerCAmelCase_ :List[str] = len(lowercase__ )
if p_len > t_len:
return False
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :Any = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase__ ):
lowerCAmelCase_ :int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCAmelCase_ :Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCAmelCase_ :Optional[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCAmelCase_ :Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _snake_case ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ :int = """abc1abc12"""
lowerCAmelCase_ :Dict = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowerCAmelCase_ :int = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(lowercase__ , lowercase__ ) and not rabin_karp(lowercase__ , lowercase__ )
# Test 2)
lowerCAmelCase_ :Dict = """ABABX"""
lowerCAmelCase_ :int = """ABABZABABYABABX"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 3)
lowerCAmelCase_ :Union[str, Any] = """AAAB"""
lowerCAmelCase_ :List[str] = """ABAAAAAB"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 4)
lowerCAmelCase_ :Dict = """abcdabcy"""
lowerCAmelCase_ :Union[str, Any] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 5)
lowerCAmelCase_ :Optional[int] = """Lü"""
lowerCAmelCase_ :Optional[int] = """Lüsai"""
assert rabin_karp(lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[int] = """Lue"""
assert not rabin_karp(lowercase__ , lowercase__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 1 | 0 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _SCREAMING_SNAKE_CASE :
@staticmethod
def __lowerCAmelCase ( *__A , **__A ) -> Any:
pass
def _snake_case ( lowercase__ : int ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
UpperCAmelCase_ :Any = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[Any]:
lowerCAmelCase_ :List[Any] = DepthEstimationPipeline(model=A_ , image_processor=A_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowerCAmelCase ( self , __A , __A ) -> Any:
lowerCAmelCase_ :Any = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , A_ )
import datasets
lowerCAmelCase_ :Tuple = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
lowerCAmelCase_ :Union[str, Any] = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , A_ , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
@slow
@require_torch
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Tuple = """Intel/dpt-large"""
lowerCAmelCase_ :str = pipeline("""depth-estimation""" , model=A_ )
lowerCAmelCase_ :Optional[Any] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
lowerCAmelCase_ :Optional[Any] = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.6_6_2 )
@require_torch
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 371 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ :Optional[Any] = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :str = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ :int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ :List[Any] = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ :List[str] = 8
else:
lowerCAmelCase_ :Optional[int] = None
return tokenizer.pad(
lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1":
lowerCAmelCase_ :Optional[Any] = 2
# New Code #
lowerCAmelCase_ :List[str] = int(args.gradient_accumulation_steps )
lowerCAmelCase_ :int = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase_ :str = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :int = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ :Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ :Optional[Any] = AdamW(params=model.parameters() , lr=lowercase__ )
# Instantiate scheduler
lowerCAmelCase_ :Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
with LocalSGD(
accelerator=lowercase__ , model=lowercase__ , local_sgd_steps=lowercase__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase__ ):
lowerCAmelCase_ :str = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = output.loss
accelerator.backward(lowercase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
def _snake_case ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowercase__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=lowercase__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase_ :Optional[Any] = parser.parse_args()
lowerCAmelCase_ :Tuple = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = "swin2sr"
UpperCAmelCase_ :Tuple = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , __A=64 , __A=1 , __A=3 , __A=180 , __A=[6, 6, 6, 6, 6, 6] , __A=[6, 6, 6, 6, 6, 6] , __A=8 , __A=2.0 , __A=True , __A=0.0 , __A=0.0 , __A=0.1 , __A="gelu" , __A=False , __A=0.0_2 , __A=1E-5 , __A=2 , __A=1.0 , __A="1conv" , __A="pixelshuffle" , **__A , ) -> Union[str, Any]:
super().__init__(**__A )
lowerCAmelCase_ :List[str] = image_size
lowerCAmelCase_ :Union[str, Any] = patch_size
lowerCAmelCase_ :Dict = num_channels
lowerCAmelCase_ :Any = embed_dim
lowerCAmelCase_ :List[Any] = depths
lowerCAmelCase_ :Tuple = len(__A )
lowerCAmelCase_ :int = num_heads
lowerCAmelCase_ :Optional[int] = window_size
lowerCAmelCase_ :str = mlp_ratio
lowerCAmelCase_ :List[Any] = qkv_bias
lowerCAmelCase_ :Tuple = hidden_dropout_prob
lowerCAmelCase_ :str = attention_probs_dropout_prob
lowerCAmelCase_ :Optional[int] = drop_path_rate
lowerCAmelCase_ :Tuple = hidden_act
lowerCAmelCase_ :int = use_absolute_embeddings
lowerCAmelCase_ :Dict = layer_norm_eps
lowerCAmelCase_ :Optional[Any] = initializer_range
lowerCAmelCase_ :List[str] = upscale
lowerCAmelCase_ :Union[str, Any] = img_range
lowerCAmelCase_ :int = resi_connection
lowerCAmelCase_ :Dict = upsampler
| 350 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 , lowercase__ : str = "bert-base-cased" ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained(lowercase__ )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ :str = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : int ) -> List[str]:
'''simple docstring'''
model.eval()
lowerCAmelCase_ :Dict = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
lowerCAmelCase_ :Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase_ :Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Tuple = metric.compute()
return eval_metric["accuracy"]
def _snake_case ( lowercase__ : str , lowercase__ : List[str] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :Optional[int] = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Optional[Any] = args.model_name_or_path
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :str = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
lowerCAmelCase_ :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ :str = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ :Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCAmelCase_ :Any = 1
lowerCAmelCase_ :str = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ :List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
lowerCAmelCase_ :int = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ :List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ :List[Any] = 0
lowerCAmelCase_ :str = evaluate.load("""glue""" , """mrpc""" )
lowerCAmelCase_ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase_ :Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase_ :Optional[Any] = args.resume_from_checkpoint.split("""epoch_""" )[1]
lowerCAmelCase_ :int = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase_ :Union[str, Any] = int(lowercase__ ) + 1
lowerCAmelCase_ :Optional[int] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.print("""resumed checkpoint performance:""" , lowercase__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
lowerCAmelCase_ :List[str] = json.load(lowercase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase_ :List[Any] = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Dict = outputs.loss
lowerCAmelCase_ :int = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase_ :List[str] = f"""epoch_{epoch}"""
lowerCAmelCase_ :Any = os.path.join(args.output_dir , lowercase__ )
accelerator.save_state(lowercase__ )
lowerCAmelCase_ :List[Any] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :Union[str, Any] = accuracy
lowerCAmelCase_ :Any = lr_scheduler.get_lr()[0]
lowerCAmelCase_ :str = optimizer.param_groups[0]["""lr"""]
lowerCAmelCase_ :List[Any] = epoch
lowerCAmelCase_ :Tuple = overall_step
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def _snake_case ( ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowercase__ , default=lowercase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowercase__ , default=lowercase__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=2 , help="""Number of train epochs.""" , )
lowerCAmelCase_ :Optional[int] = parser.parse_args()
lowerCAmelCase_ :List[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Dict = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCAmelCase_ :List[Any] = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) )
self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Dict = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCAmelCase_ :List[str] = get_activation("""gelu""" )
lowerCAmelCase_ :Optional[int] = get_activation("""gelu_10""" )
lowerCAmelCase_ :Tuple = torch_builtin(__A )
lowerCAmelCase_ :Optional[int] = geluaa(__A )
lowerCAmelCase_ :str = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__A ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(__A ):
get_activation("""bogus""" )
with self.assertRaises(__A ):
get_activation(__A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[Any] = get_activation("""gelu""" )
lowerCAmelCase_ :List[str] = 1
lowerCAmelCase_ :List[Any] = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__A ):
lowerCAmelCase_ :Union[str, Any] = acta.a
| 351 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Union[str, Any]:
if isinstance(__A , __A ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowerCAmelCase_ :Tuple = deepcopy(__A )
elif os.path.exists(__A ):
with io.open(__A , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase_ :str = json.load(__A )
else:
try:
lowerCAmelCase_ :Dict = baseaa.urlsafe_baadecode(__A ).decode("""utf-8""" )
lowerCAmelCase_ :int = json.loads(__A )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
lowerCAmelCase_ :Optional[Any] = config
self.set_stage_and_offload()
def __lowerCAmelCase ( self ) -> Tuple:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowerCAmelCase_ :Tuple = self.get_value("""zero_optimization.stage""" , -1 )
# offload
lowerCAmelCase_ :Dict = False
if self.is_zeroa() or self.is_zeroa():
lowerCAmelCase_ :Optional[int] = set(["""cpu""", """nvme"""] )
lowerCAmelCase_ :Union[str, Any] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowerCAmelCase_ :Optional[int] = True
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
lowerCAmelCase_ :str = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Tuple = ds_key_long.split(""".""" )
lowerCAmelCase_ :List[str] = nodes.pop()
for node in nodes:
lowerCAmelCase_ :Tuple = config.get(__A )
if config is None:
return None, ds_key
return config, ds_key
def __lowerCAmelCase ( self , __A , __A=None ) -> Optional[Any]:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.find_config_node(__A )
if config is None:
return default
return config.get(__A , __A )
def __lowerCAmelCase ( self , __A , __A=False ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Union[str, Any] = ds_key_long.split(""".""" )
for node in nodes:
lowerCAmelCase_ :int = config
lowerCAmelCase_ :Any = config.get(__A )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__A )
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = self.get_value(__A )
return False if value is None else bool(__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
lowerCAmelCase_ :List[str] = self.get_value(__A )
return False if value is None else not bool(__A )
def __lowerCAmelCase ( self ) -> str:
return self._stage == 2
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._stage == 3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._offload
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Optional[int]:
lowerCAmelCase_ :Dict = engine
def __lowerCAmelCase ( self , __A , **__A ) -> str:
# runs backpropagation and handles mixed precision
self.engine.backward(__A , **__A )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> List[str]:
super().__init__(__A , device_placement=__A , scaler=__A )
lowerCAmelCase_ :List[str] = hasattr(self.optimizer , """overflow""" )
def __lowerCAmelCase ( self , __A=None ) -> Optional[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __lowerCAmelCase ( self ) -> List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __lowerCAmelCase ( self ) -> int:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A ) -> Optional[int]:
super().__init__(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=0.0_0_1 , __A=0 , **__A ) -> List[Any]:
lowerCAmelCase_ :str = params
lowerCAmelCase_ :Any = lr
lowerCAmelCase_ :List[Any] = weight_decay
lowerCAmelCase_ :Any = kwargs
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=None , __A=0 , **__A ) -> List[str]:
lowerCAmelCase_ :Optional[int] = optimizer
lowerCAmelCase_ :int = total_num_steps
lowerCAmelCase_ :List[Any] = warmup_num_steps
lowerCAmelCase_ :int = kwargs
| 1 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Dict = 0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def _snake_case ( lowercase__ : int = 1_0_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Dict = 1
lowerCAmelCase_ :Union[str, Any] = 2
for i in range(2 , max_n + 1 ):
lowerCAmelCase_ :List[Any] = pre_numerator
lowerCAmelCase_ :Dict = 2 * i // 3 if i % 3 == 0 else 1
lowerCAmelCase_ :Any = cur_numerator
lowerCAmelCase_ :str = e_cont * pre_numerator + temp
return sum_digits(lowercase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 352 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCAmelCase_ :List[str] = "CIDAS/clipseg-rd64-refined"
UpperCAmelCase_ :List[Any] = "image_segmenter"
UpperCAmelCase_ :Optional[int] = CLIPSegForImageSegmentation
UpperCAmelCase_ :Tuple = ["image", "text"]
UpperCAmelCase_ :Dict = ["image"]
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A ) -> Any:
return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" )
def __lowerCAmelCase ( self , __A ) -> Tuple:
with torch.no_grad():
lowerCAmelCase_ :Dict = self.model(**__A ).logits
return logits
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :Optional[int] = outputs.cpu().detach().numpy()
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :str = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 1 | 0 |
"""simple docstring"""
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=14 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.0_2 , __A=3 , __A=4 , __A=None , ) -> Optional[Any]:
lowerCAmelCase_ :Any = parent
lowerCAmelCase_ :List[Any] = batch_size
lowerCAmelCase_ :Union[str, Any] = seq_length
lowerCAmelCase_ :Optional[Any] = is_training
lowerCAmelCase_ :Optional[Any] = use_token_type_ids
lowerCAmelCase_ :List[Any] = use_input_mask
lowerCAmelCase_ :List[Any] = use_labels
lowerCAmelCase_ :Optional[Any] = use_mc_token_ids
lowerCAmelCase_ :Union[str, Any] = vocab_size
lowerCAmelCase_ :Any = hidden_size
lowerCAmelCase_ :List[Any] = num_hidden_layers
lowerCAmelCase_ :Optional[Any] = num_attention_heads
lowerCAmelCase_ :Optional[int] = intermediate_size
lowerCAmelCase_ :List[Any] = hidden_act
lowerCAmelCase_ :Any = hidden_dropout_prob
lowerCAmelCase_ :List[str] = attention_probs_dropout_prob
lowerCAmelCase_ :str = max_position_embeddings
lowerCAmelCase_ :List[Any] = type_vocab_size
lowerCAmelCase_ :Tuple = type_sequence_label_size
lowerCAmelCase_ :str = initializer_range
lowerCAmelCase_ :List[Any] = num_labels
lowerCAmelCase_ :Dict = num_choices
lowerCAmelCase_ :List[Any] = scope
lowerCAmelCase_ :Union[str, Any] = self.vocab_size - 1
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ :int = None
if self.use_input_mask:
lowerCAmelCase_ :List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ :Union[str, Any] = None
if self.use_token_type_ids:
lowerCAmelCase_ :int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ :str = None
if self.use_mc_token_ids:
lowerCAmelCase_ :Dict = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
lowerCAmelCase_ :Optional[Any] = None
lowerCAmelCase_ :Dict = None
lowerCAmelCase_ :Optional[Any] = None
if self.use_labels:
lowerCAmelCase_ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ :int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ :Union[str, Any] = self.get_config()
lowerCAmelCase_ :Dict = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , *__A ) -> List[Any]:
lowerCAmelCase_ :int = CTRLModel(config=__A )
model.to(__A )
model.eval()
model(__A , token_type_ids=__A , head_mask=__A )
model(__A , token_type_ids=__A )
lowerCAmelCase_ :Dict = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , *__A ) -> Optional[int]:
lowerCAmelCase_ :Dict = CTRLLMHeadModel(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Optional[int] = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :str = self.prepare_config_and_inputs()
(
lowerCAmelCase_
) :Optional[Any] = config_and_inputs
lowerCAmelCase_ :Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def __lowerCAmelCase ( self , __A , __A , __A , __A , *__A ) -> Optional[int]:
lowerCAmelCase_ :Optional[Any] = self.num_labels
lowerCAmelCase_ :Optional[int] = CTRLForSequenceClassification(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :str = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :Optional[int] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase_ :Optional[Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase_ :Tuple = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ :int = True
UpperCAmelCase_ :int = False
UpperCAmelCase_ :List[Any] = False
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A ) -> Optional[Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :int = CTRLModelTester(self )
lowerCAmelCase_ :Union[str, Any] = ConfigTester(self , config_class=__A , n_embd=37 )
def __lowerCAmelCase ( self ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__A )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
@slow
def __lowerCAmelCase ( self ) -> Any:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ :str = CTRLModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def __lowerCAmelCase ( self ) -> Any:
pass
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :List[str] = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(__A )
lowerCAmelCase_ :Union[str, Any] = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=__A ) # Legal the president is
lowerCAmelCase_ :Dict = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowerCAmelCase_ :str = model.generate(__A , do_sample=__A )
self.assertListEqual(output_ids[0].tolist() , __A )
| 353 |
"""simple docstring"""
def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :Dict = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase_ :str = values[index] + knapsack(
lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 )
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :Dict = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase_ :str = values[index] + knapsack(
lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 )
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _snake_case ( lowercase__ : bool = True , *lowercase__ : Optional[int] , **lowercase__ : str ) -> Optional[Any]:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
lowerCAmelCase_ :Tuple = False
if main_process_only:
lowerCAmelCase_ :Dict = PartialState().local_process_index == 0
return _tqdm(*lowercase__ , **lowercase__ , disable=lowercase__ )
| 1 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.0_2 , __A=4 , ) -> List[str]:
lowerCAmelCase_ :Union[str, Any] = parent
lowerCAmelCase_ :List[str] = batch_size
lowerCAmelCase_ :List[Any] = seq_length
lowerCAmelCase_ :int = is_training
lowerCAmelCase_ :Optional[int] = use_attention_mask
lowerCAmelCase_ :Tuple = use_token_type_ids
lowerCAmelCase_ :List[Any] = use_labels
lowerCAmelCase_ :Tuple = vocab_size
lowerCAmelCase_ :Dict = hidden_size
lowerCAmelCase_ :int = num_hidden_layers
lowerCAmelCase_ :Optional[Any] = num_attention_heads
lowerCAmelCase_ :Optional[int] = intermediate_size
lowerCAmelCase_ :Optional[int] = hidden_act
lowerCAmelCase_ :List[Any] = hidden_dropout_prob
lowerCAmelCase_ :Any = attention_probs_dropout_prob
lowerCAmelCase_ :int = max_position_embeddings
lowerCAmelCase_ :Dict = type_vocab_size
lowerCAmelCase_ :Any = type_sequence_label_size
lowerCAmelCase_ :Optional[int] = initializer_range
lowerCAmelCase_ :Tuple = num_choices
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ :Any = None
if self.use_attention_mask:
lowerCAmelCase_ :Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ :Any = None
if self.use_token_type_ids:
lowerCAmelCase_ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ :Dict = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Any = self.prepare_config_and_inputs()
lowerCAmelCase_ :Tuple = config_and_inputs
lowerCAmelCase_ :str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :int = True
UpperCAmelCase_ :int = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Tuple = FlaxRoFormerModelTester(self )
@slow
def __lowerCAmelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
lowerCAmelCase_ :Optional[int] = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__A )
lowerCAmelCase_ :Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Any = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
lowerCAmelCase_ :List[str] = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase_ :Union[str, Any] = model(__A )[0]
lowerCAmelCase_ :int = 5_0000
lowerCAmelCase_ :Optional[int] = (1, 6, vocab_size)
self.assertEqual(output.shape , __A )
lowerCAmelCase_ :List[str] = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __A , atol=1E-4 ) )
| 355 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__UpperCAmelCase = get_tests_dir('fixtures/dummy-config.json')
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = 0
def __lowerCAmelCase ( self ) -> List[str]:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCAmelCase_ :int = os.path.join(__A , """fake-roberta""" )
os.makedirs(__A , exist_ok=__A )
with open(os.path.join(__A , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertEqual(type(__A ) , __A )
def __lowerCAmelCase ( self ) -> Optional[int]:
try:
AutoConfig.register("""custom""" , __A )
# Wrong model type will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""model""" , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""bert""" , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase_ :Union[str, Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Optional[int] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowerCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
__A , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""bert-base""" )
def __lowerCAmelCase ( self ) -> Any:
with self.assertRaisesRegex(
__A , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , revision="""aaaaaa""" )
def __lowerCAmelCase ( self ) -> int:
with self.assertRaisesRegex(
__A , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def __lowerCAmelCase ( self ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
lowerCAmelCase_ :str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def __lowerCAmelCase ( self ) -> int:
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :int = "new-model"
try:
AutoConfig.register("""new-model""" , __A )
# If remote code is not set, the default is to use local
lowerCAmelCase_ :Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
lowerCAmelCase_ :Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 1 | 0 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__UpperCAmelCase = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__UpperCAmelCase = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__UpperCAmelCase = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : int ) -> Any:
'''simple docstring'''
return float((preds == labels).mean() )
def _snake_case ( lowercase__ : List[Any] , lowercase__ : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = simple_accuracy(lowercase__ , lowercase__ )
lowerCAmelCase_ :str = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :int = np.array(lowercase__ )
lowerCAmelCase_ :Dict = np.array(lowercase__ )
lowerCAmelCase_ :str = en_sentvecs.shape[0]
# mean centering
lowerCAmelCase_ :Any = en_sentvecs - np.mean(lowercase__ , axis=0 )
lowerCAmelCase_ :List[str] = in_sentvecs - np.mean(lowercase__ , axis=0 )
lowerCAmelCase_ :str = cdist(lowercase__ , lowercase__ , """cosine""" )
lowerCAmelCase_ :Tuple = np.array(range(lowercase__ ) )
lowerCAmelCase_ :Optional[int] = sim.argsort(axis=1 )[:, :1_0]
lowerCAmelCase_ :Optional[Any] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> int:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
"""references""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if self.config_name != """cvit-mkb-clsr""" else None , )
def __lowerCAmelCase ( self , __A , __A ) -> Tuple:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__A , __A )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__A , __A )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__A , __A )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
| 356 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :str = GPTSanJapaneseTokenizer
UpperCAmelCase_ :Optional[int] = False
UpperCAmelCase_ :Optional[int] = {"do_clean_text": False, "add_prefix_space": False}
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
# fmt: off
lowerCAmelCase_ :Dict = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase_ :List[str] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowerCAmelCase_ :int = {"""unk_token""": """<unk>"""}
lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__A ) )
def __lowerCAmelCase ( self , **__A ) -> int:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__A )
def __lowerCAmelCase ( self , __A ) -> Dict:
lowerCAmelCase_ :List[Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowerCAmelCase_ :Optional[int] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def __lowerCAmelCase ( self , __A ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = self.get_input_output_texts(__A )
lowerCAmelCase_ :List[str] = tokenizer.encode(__A , add_special_tokens=__A )
lowerCAmelCase_ :str = tokenizer.decode(__A , clean_up_tokenization_spaces=__A )
return text, ids
def __lowerCAmelCase ( self ) -> str:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Dict:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> int:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Union[str, Any] = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ :Optional[int] = """こんにちは、世界。 こんばんは、㔺界。"""
lowerCAmelCase_ :Any = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowerCAmelCase_ :Tuple = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids without special tokens
lowerCAmelCase_ :List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCAmelCase_ :List[str] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids with special tokens
lowerCAmelCase_ :Any = tokens + [tokenizer.unk_token]
lowerCAmelCase_ :Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCAmelCase_ :Union[str, Any] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ :Optional[int] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowerCAmelCase_ :str = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowerCAmelCase_ :str = tokenizer.encode(__A )
lowerCAmelCase_ :Dict = tokenizer.decode(__A )
self.assertEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase_ :Optional[Any] = """こんにちは、世界。"""
lowerCAmelCase_ :Any = """こんばんは、㔺界。😀"""
lowerCAmelCase_ :Optional[Any] = """こんにちは、世界。こんばんは、世界。😀"""
lowerCAmelCase_ :List[Any] = tokenizer.encode(prefix_text + input_text )
lowerCAmelCase_ :List[str] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowerCAmelCase_ :int = tokenizer.encode(__A , prefix_text=__A )
lowerCAmelCase_ :int = tokenizer.decode(__A )
lowerCAmelCase_ :Dict = tokenizer.decode(__A )
lowerCAmelCase_ :Tuple = tokenizer.decode(__A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase_ :List[Any] = """こんにちは、世界。"""
lowerCAmelCase_ :Optional[int] = """こんばんは、㔺界。😀"""
lowerCAmelCase_ :List[str] = len(tokenizer.encode(__A ) ) - 2
lowerCAmelCase_ :Dict = len(tokenizer.encode(__A ) ) - 2
lowerCAmelCase_ :int = [1] + [0] * (len_prefix + len_text + 1)
lowerCAmelCase_ :List[Any] = [1] * (len_prefix + len_text + 1) + [0]
lowerCAmelCase_ :Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCAmelCase_ :List[Any] = tokenizer(prefix_text + input_text ).token_type_ids
lowerCAmelCase_ :List[str] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowerCAmelCase_ :List[Any] = tokenizer(__A , prefix_text=__A ).token_type_ids
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase_ :int = tokenizer.encode("""あンいワ""" )
lowerCAmelCase_ :Optional[Any] = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowerCAmelCase_ :int = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertNotEqual(__A , __A )
self.assertNotEqual(__A , __A )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase_ :int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowerCAmelCase_ :Dict = tokenizer(__A , padding=__A )
lowerCAmelCase_ :Any = tokenizer.batch_encode_plus(__A , padding=__A )
# fmt: off
lowerCAmelCase_ :int = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowerCAmelCase_ :List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCAmelCase_ :int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __A )
self.assertListEqual(x_token.token_type_ids , __A )
self.assertListEqual(x_token.attention_mask , __A )
self.assertListEqual(x_token_a.input_ids , __A )
self.assertListEqual(x_token_a.token_type_ids , __A )
self.assertListEqual(x_token_a.attention_mask , __A )
def __lowerCAmelCase ( self ) -> Tuple:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __lowerCAmelCase ( self ) -> str:
# tokenizer has no padding token
pass
| 1 | 0 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _SCREAMING_SNAKE_CASE ( unittest.TestCase , A__ ):
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Dict = load_tool("""text-classification""" )
self.tool.setup()
lowerCAmelCase_ :int = load_tool("""text-classification""" , remote=__A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Any = self.tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(__A , """positive""" )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :str = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(__A , """positive""" )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Dict = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(__A , """positive""" )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Dict = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(__A , """positive""" )
| 357 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__UpperCAmelCase = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
__UpperCAmelCase = dataset.iloc[:, 1:2].values
__UpperCAmelCase = dataset.iloc[:, 2].values
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0)
__UpperCAmelCase = PolynomialFeatures(degree=4)
__UpperCAmelCase = poly_reg.fit_transform(X)
__UpperCAmelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def _snake_case ( ) -> str:
'''simple docstring'''
plt.scatter(lowercase__ , lowercase__ , color="""red""" )
plt.plot(lowercase__ , pol_reg.predict(poly_reg.fit_transform(lowercase__ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 1 | 0 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__UpperCAmelCase = 5_00_03
__UpperCAmelCase = 5_00_02
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Dict = PLBartTokenizer
UpperCAmelCase_ :Any = None
UpperCAmelCase_ :List[Any] = False
def __lowerCAmelCase ( self ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ :List[str] = PLBartTokenizer(__A , language_codes="""base""" , keep_accents=__A )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Tuple = PLBartTokenizer(__A , language_codes="""base""" , keep_accents=__A )
lowerCAmelCase_ :int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase_ :Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase_ :str = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase_ :Optional[int] = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
lowerCAmelCase_ :Optional[int] = tokenizer.vocab_size
lowerCAmelCase_ :List[Any] = [tokenizer.convert_ids_to_tokens(__A ) for x in range(end - 4 , __A )]
self.assertListEqual(__A , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
lowerCAmelCase_ :Tuple = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
lowerCAmelCase_ :int = tokenizer(__A ).input_ids
self.assertEqual(
tokenizer.decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A ) , __A , )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :str = PLBartTokenizer(__A , language_codes="""multi""" , keep_accents=__A )
lowerCAmelCase_ :Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase_ :Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase_ :Tuple = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase_ :Optional[int] = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
lowerCAmelCase_ :Optional[int] = tokenizer.vocab_size
lowerCAmelCase_ :Optional[int] = [tokenizer.convert_ids_to_tokens(__A ) for x in range(end - 7 , __A )]
self.assertListEqual(
__A , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
lowerCAmelCase_ :Optional[int] = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
lowerCAmelCase_ :str = tokenizer(__A ).input_ids
self.assertEqual(
tokenizer.decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A ) , __A , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
UpperCAmelCase_ :Union[str, Any] = "uclanlp/plbart-python-en_XX"
UpperCAmelCase_ :Any = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
UpperCAmelCase_ :Any = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
UpperCAmelCase_ :Any = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def __lowerCAmelCase ( cls ) -> str:
lowerCAmelCase_ :PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
lowerCAmelCase_ :Optional[int] = 1
return cls
def __lowerCAmelCase ( self ) -> Any:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_0002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_0003 )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertIn(__A , self.tokenizer.all_special_ids )
lowerCAmelCase_ :Optional[Any] = [EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2]
lowerCAmelCase_ :List[str] = self.tokenizer.decode(__A , skip_special_tokens=__A )
lowerCAmelCase_ :List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__A )
self.assertEqual(__A , __A )
self.assertNotIn(self.tokenizer.eos_token , __A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Union[str, Any] = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , __A )
lowerCAmelCase_ :Any = 10
lowerCAmelCase_ :Any = self.tokenizer(__A , max_length=__A , truncation=__A ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __A )
self.assertEqual(len(__A ) , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_0004, 5_0001] )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Tuple = tempfile.mkdtemp()
lowerCAmelCase_ :str = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__A )
lowerCAmelCase_ :Union[str, Any] = PLBartTokenizer.from_pretrained(__A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __A )
@require_torch
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__A , return_tensors="""pt""" )
lowerCAmelCase_ :str = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __A )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__A , truncation=__A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
lowerCAmelCase_ :Any = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__A , __A )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
lowerCAmelCase_ :Any = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __A )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Union[str, Any] = self.tokenizer(self.src_text , padding=__A , truncation=__A , max_length=3 , return_tensors="""pt""" )
lowerCAmelCase_ :int = self.tokenizer(
text_target=self.tgt_text , padding=__A , truncation=__A , max_length=10 , return_tensors="""pt""" )
lowerCAmelCase_ :Dict = targets["""input_ids"""]
lowerCAmelCase_ :Tuple = shift_tokens_right(__A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[str] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(__A ) , {
# A, test, EOS, en_XX
"""input_ids""": [[150, 242, 2, 5_0003]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_0001,
} , )
| 358 |
"""simple docstring"""
from __future__ import annotations
__UpperCAmelCase = 1.6021e-19 # units = C
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float , ) -> tuple[str, float]:
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 0 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__UpperCAmelCase = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, 'r', encoding='utf-8') as f:
__UpperCAmelCase = json.load(f)
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self , __A ) -> List[Any]:
return FSMTTokenizer.from_pretrained(__A )
def __lowerCAmelCase ( self , __A ) -> Any:
lowerCAmelCase_ :Any = FSMTForConditionalGeneration.from_pretrained(__A ).to(__A )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def __lowerCAmelCase ( self , __A , __A ) -> Dict:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowerCAmelCase_ :int = f"""facebook/wmt19-{pair}"""
lowerCAmelCase_ :List[str] = self.get_tokenizer(__A )
lowerCAmelCase_ :Union[str, Any] = self.get_model(__A )
lowerCAmelCase_ :Optional[Any] = bleu_data[pair]["""src"""]
lowerCAmelCase_ :List[Any] = bleu_data[pair]["""tgt"""]
lowerCAmelCase_ :int = tokenizer(__A , return_tensors="""pt""" , truncation=__A , padding="""longest""" ).to(__A )
lowerCAmelCase_ :Dict = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCAmelCase_ :Tuple = tokenizer.batch_decode(
__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )
lowerCAmelCase_ :List[str] = calculate_bleu(__A , __A )
print(__A )
self.assertGreaterEqual(scores["""bleu"""] , __A )
| 359 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 1 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Any = """laion/clap-htsat-unfused"""
lowerCAmelCase_ :Optional[Any] = tempfile.mkdtemp()
def __lowerCAmelCase ( self , **__A ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self , **__A ) -> Tuple:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :Optional[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Dict = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase_ :Dict = self.get_feature_extractor(do_normalize=__A , padding_value=1.0 )
lowerCAmelCase_ :Union[str, Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = self.get_feature_extractor()
lowerCAmelCase_ :str = self.get_tokenizer()
lowerCAmelCase_ :List[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :Optional[Any] = floats_list((3, 1000) )
lowerCAmelCase_ :Optional[Any] = feature_extractor(__A , return_tensors="""np""" )
lowerCAmelCase_ :str = processor(audios=__A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :List[Any] = """This is a test string"""
lowerCAmelCase_ :Dict = processor(text=__A )
lowerCAmelCase_ :List[str] = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :int = self.get_feature_extractor()
lowerCAmelCase_ :Tuple = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ :Tuple = processor.batch_decode(__A )
lowerCAmelCase_ :Optional[Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 360 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _snake_case ( lowercase__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
lowerCAmelCase_ :Dict = f"""https://www.amazon.in/laptop/s?k={product}"""
lowerCAmelCase_ :List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
lowerCAmelCase_ :List[Any] = BeautifulSoup(requests.get(lowercase__ , headers=lowercase__ ).text )
# Initialize a Pandas dataframe with the column titles
lowerCAmelCase_ :Union[str, Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
lowerCAmelCase_ :str = item.ha.text
lowerCAmelCase_ :Dict = """https://www.amazon.in/""" + item.ha.a["""href"""]
lowerCAmelCase_ :int = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
lowerCAmelCase_ :Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
lowerCAmelCase_ :int = """Not available"""
try:
lowerCAmelCase_ :str = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
lowerCAmelCase_ :Optional[Any] = """"""
try:
lowerCAmelCase_ :str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_0_0 )
except ValueError:
lowerCAmelCase_ :Union[str, Any] = float("""nan""" )
except AttributeError:
pass
lowerCAmelCase_ :Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCAmelCase_ :List[Any] = """ """
lowerCAmelCase_ :Tuple = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__UpperCAmelCase = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 1 | 0 |
"""simple docstring"""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( lowercase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
lowerCAmelCase_ :List[Any] = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowercase__ )
if matches:
lowerCAmelCase_ :Optional[int] = float(matches[1] )
lowerCAmelCase_ :Dict = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowerCAmelCase_ :Union[str, Any] = 1_0_0_1
lowerCAmelCase_ :Dict = """imagenet-1k-id2label.json"""
lowerCAmelCase_ :List[str] = """huggingface/label-files"""
lowerCAmelCase_ :Dict = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ :Tuple = {int(lowercase__ ) + 1: v for k, v in idalabel.items()}
lowerCAmelCase_ :str = """background"""
lowerCAmelCase_ :Union[str, Any] = idalabel
lowerCAmelCase_ :List[str] = {v: k for k, v in idalabel.items()}
return config
def _snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ :Optional[int] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : str=False ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Dict = get_mobilenet_va_config(lowercase__ )
# Load 🤗 model
lowerCAmelCase_ :str = MobileNetVaForImageClassification(lowercase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase__ , lowercase__ , lowercase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowerCAmelCase_ :Optional[int] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 3_2} , )
lowerCAmelCase_ :List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase_ :int = model(**lowercase__ )
lowerCAmelCase_ :List[Any] = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
lowerCAmelCase_ :str = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
lowerCAmelCase_ :Optional[int] = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
lowerCAmelCase_ :List[str] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowercase__ , atol=1E-4 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
lowerCAmelCase_ :Union[str, Any] = """google/""" + model_name
image_processor.push_to_hub(lowercase__ )
model.push_to_hub(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__UpperCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 361 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Any = """laion/clap-htsat-unfused"""
lowerCAmelCase_ :Optional[Any] = tempfile.mkdtemp()
def __lowerCAmelCase ( self , **__A ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self , **__A ) -> Tuple:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :Optional[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Dict = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase_ :Dict = self.get_feature_extractor(do_normalize=__A , padding_value=1.0 )
lowerCAmelCase_ :Union[str, Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = self.get_feature_extractor()
lowerCAmelCase_ :str = self.get_tokenizer()
lowerCAmelCase_ :List[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :Optional[Any] = floats_list((3, 1000) )
lowerCAmelCase_ :Optional[Any] = feature_extractor(__A , return_tensors="""np""" )
lowerCAmelCase_ :str = processor(audios=__A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :List[Any] = """This is a test string"""
lowerCAmelCase_ :Dict = processor(text=__A )
lowerCAmelCase_ :List[str] = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :int = self.get_feature_extractor()
lowerCAmelCase_ :Tuple = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ :Tuple = processor.batch_decode(__A )
lowerCAmelCase_ :Optional[Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 1 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=128 , __A=32 , __A=16 , __A=2 , __A=0.0_2 , __A=3 , __A=4 , __A=None , ) -> str:
lowerCAmelCase_ :Union[str, Any] = parent
lowerCAmelCase_ :str = batch_size
lowerCAmelCase_ :Optional[Any] = seq_length
lowerCAmelCase_ :Union[str, Any] = is_training
lowerCAmelCase_ :Any = use_input_mask
lowerCAmelCase_ :Optional[int] = use_token_type_ids
lowerCAmelCase_ :List[str] = use_labels
lowerCAmelCase_ :Any = vocab_size
lowerCAmelCase_ :Union[str, Any] = hidden_size
lowerCAmelCase_ :Tuple = num_hidden_layers
lowerCAmelCase_ :Optional[int] = num_attention_heads
lowerCAmelCase_ :Tuple = intermediate_size
lowerCAmelCase_ :Tuple = hidden_act
lowerCAmelCase_ :List[str] = hidden_dropout_prob
lowerCAmelCase_ :Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ :Optional[int] = max_position_embeddings
lowerCAmelCase_ :Tuple = type_vocab_size
lowerCAmelCase_ :Union[str, Any] = type_sequence_label_size
lowerCAmelCase_ :int = initializer_range
lowerCAmelCase_ :str = num_labels
lowerCAmelCase_ :List[str] = num_choices
lowerCAmelCase_ :int = scope
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ :Dict = None
if self.use_input_mask:
lowerCAmelCase_ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ :Union[str, Any] = None
if self.use_token_type_ids:
lowerCAmelCase_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ :List[Any] = None
lowerCAmelCase_ :List[Any] = None
lowerCAmelCase_ :str = None
if self.use_labels:
lowerCAmelCase_ :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ :Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ :List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> Optional[int]:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ) -> str:
(
lowerCAmelCase_
) :Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ :List[Any] = True
lowerCAmelCase_ :Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase_ :Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[str]:
lowerCAmelCase_ :Tuple = NezhaModel(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :str = model(__A , attention_mask=__A , token_type_ids=__A )
lowerCAmelCase_ :int = model(__A , token_type_ids=__A )
lowerCAmelCase_ :Dict = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) -> Tuple:
lowerCAmelCase_ :Optional[Any] = True
lowerCAmelCase_ :int = NezhaModel(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :str = model(
__A , attention_mask=__A , token_type_ids=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )
lowerCAmelCase_ :Tuple = model(
__A , attention_mask=__A , token_type_ids=__A , encoder_hidden_states=__A , )
lowerCAmelCase_ :str = model(__A , attention_mask=__A , token_type_ids=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> int:
lowerCAmelCase_ :List[Any] = NezhaForMaskedLM(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :List[str] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[int]:
lowerCAmelCase_ :Any = NezhaForNextSentencePrediction(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :str = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> Any:
lowerCAmelCase_ :Dict = NezhaForPreTraining(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :str = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , next_sentence_label=__A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[Any]:
lowerCAmelCase_ :Tuple = NezhaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Tuple = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> Any:
lowerCAmelCase_ :Union[str, Any] = self.num_labels
lowerCAmelCase_ :Tuple = NezhaForSequenceClassification(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :List[Any] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[Any]:
lowerCAmelCase_ :Tuple = self.num_labels
lowerCAmelCase_ :List[str] = NezhaForTokenClassification(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :int = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> Dict:
lowerCAmelCase_ :Dict = self.num_choices
lowerCAmelCase_ :Dict = NezhaForMultipleChoice(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ :List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ :Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ :Dict = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[Any] = self.prepare_config_and_inputs()
(
lowerCAmelCase_
) :int = config_and_inputs
lowerCAmelCase_ :List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :Union[str, Any] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ :Optional[int] = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ :Optional[Any] = True
def __lowerCAmelCase ( self , __A , __A , __A=False ) -> Optional[Any]:
lowerCAmelCase_ :str = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
lowerCAmelCase_ :Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A )
lowerCAmelCase_ :Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :List[Any] = NezhaModelTester(self )
lowerCAmelCase_ :int = ConfigTester(self , config_class=__A , hidden_size=37 )
def __lowerCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__A )
def __lowerCAmelCase ( self ) -> str:
# This regression test was failing with PyTorch < 1.3
(
lowerCAmelCase_
) :Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase_ :Any = None
self.model_tester.create_and_check_model_as_decoder(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__A )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def __lowerCAmelCase ( self ) -> str:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ :Tuple = NezhaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowerCAmelCase_ :int = True
lowerCAmelCase_ :Optional[int] = model_class(config=__A )
lowerCAmelCase_ :Any = self._prepare_for_class(__A , __A )
lowerCAmelCase_ :Optional[int] = torch.jit.trace(
__A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , """bert.pt""" ) )
lowerCAmelCase_ :Union[str, Any] = torch.jit.load(os.path.join(__A , """bert.pt""" ) , map_location=__A )
loaded(inputs_dict["""input_ids"""].to(__A ) , inputs_dict["""attention_mask"""].to(__A ) )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Optional[int] = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
lowerCAmelCase_ :List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase_ :Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase_ :Dict = model(__A , attention_mask=__A )[0]
lowerCAmelCase_ :int = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __A )
lowerCAmelCase_ :Optional[Any] = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Dict = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
lowerCAmelCase_ :Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase_ :Optional[int] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase_ :Tuple = model(__A , attention_mask=__A )[0]
lowerCAmelCase_ :Dict = torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , __A )
lowerCAmelCase_ :str = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1E-4 ) )
| 362 |
"""simple docstring"""
import os
from math import logaa
def _snake_case ( lowercase__ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
lowerCAmelCase_ :float = 0
lowerCAmelCase_ :Union[str, Any] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = list(map(lowercase__ , line.split(""",""" ) ) )
if x * logaa(lowercase__ ) > largest:
lowerCAmelCase_ :Any = x * logaa(lowercase__ )
lowerCAmelCase_ :List[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1 | 0 |
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : List[str]=None , lowercase__ : str=None , lowercase__ : List[Any]=None , lowercase__ : Dict=None , lowercase__ : List[Any]=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
lowerCAmelCase_ :str = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCAmelCase_ :List[str] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCAmelCase_ :Dict = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=lowercase__ )
if decoder_head_mask is None:
lowerCAmelCase_ :List[str] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowercase__ )
if cross_attn_head_mask is None:
lowerCAmelCase_ :Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowercase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=False , __A=99 , __A=16 , __A=2 , __A=4 , __A=4 , __A="relu" , __A=0.1 , __A=0.1 , __A=0.0 , __A=0.0 , __A=20 , __A=2 , __A=1 , __A=0 , ) -> int:
lowerCAmelCase_ :Union[str, Any] = parent
lowerCAmelCase_ :Tuple = batch_size
lowerCAmelCase_ :Optional[Any] = seq_length
lowerCAmelCase_ :str = is_training
lowerCAmelCase_ :str = use_labels
lowerCAmelCase_ :List[str] = vocab_size
lowerCAmelCase_ :Dict = hidden_size
lowerCAmelCase_ :Optional[Any] = num_hidden_layers
lowerCAmelCase_ :Any = num_attention_heads
lowerCAmelCase_ :Union[str, Any] = intermediate_size
lowerCAmelCase_ :Optional[int] = hidden_act
lowerCAmelCase_ :int = hidden_dropout_prob
lowerCAmelCase_ :Tuple = attention_probs_dropout_prob
lowerCAmelCase_ :Optional[Any] = encoder_layerdrop
lowerCAmelCase_ :Optional[int] = decoder_layerdrop
lowerCAmelCase_ :Dict = max_position_embeddings
lowerCAmelCase_ :Optional[int] = eos_token_id
lowerCAmelCase_ :Optional[int] = pad_token_id
lowerCAmelCase_ :List[str] = bos_token_id
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ :Tuple = self.eos_token_id # Eos Token
lowerCAmelCase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCAmelCase_ :List[str] = input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase_ :int = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase_ :List[Any] = self.get_config()
lowerCAmelCase_ :int = prepare_mam_aaa_inputs_dict(__A , __A , __A )
return config, inputs_dict
def __lowerCAmelCase ( self ) -> int:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self , __A , __A ) -> Any:
lowerCAmelCase_ :str = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval()
lowerCAmelCase_ :Union[str, Any] = inputs_dict["""input_ids"""]
lowerCAmelCase_ :Optional[Any] = inputs_dict["""attention_mask"""]
lowerCAmelCase_ :Any = inputs_dict["""head_mask"""]
# first forward pass
lowerCAmelCase_ :int = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
lowerCAmelCase_ :List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase_ :str = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ :List[Any] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCAmelCase_ :List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase_ :str = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCAmelCase_ :Dict = model(__A , attention_mask=__A )["""last_hidden_state"""]
lowerCAmelCase_ :Dict = model(__A , attention_mask=__A , past_key_values=__A )[
"""last_hidden_state"""
]
# select random slice
lowerCAmelCase_ :Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase_ :int = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase_ :Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1E-2 ) )
def __lowerCAmelCase ( self , __A , __A ) -> int:
lowerCAmelCase_ :List[str] = MaMaaaModel(config=__A ).to(__A ).eval()
lowerCAmelCase_ :Dict = model(**__A )
lowerCAmelCase_ :Tuple = outputs.encoder_last_hidden_state
lowerCAmelCase_ :Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ :Optional[int] = model.get_encoder()
encoder.save_pretrained(__A )
lowerCAmelCase_ :Union[str, Any] = MaMaaaEncoder.from_pretrained(__A ).to(__A )
lowerCAmelCase_ :List[Any] = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ :Union[str, Any] = model.get_decoder()
decoder.save_pretrained(__A )
lowerCAmelCase_ :Any = MaMaaaDecoder.from_pretrained(__A ).to(__A )
lowerCAmelCase_ :Tuple = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :Union[str, Any] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ :str = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase_ :Dict = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ :Optional[Any] = True
UpperCAmelCase_ :Optional[int] = True
UpperCAmelCase_ :List[str] = False
UpperCAmelCase_ :Optional[Any] = False
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A ) -> Optional[int]:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :str = MaMaaaModelTester(self )
lowerCAmelCase_ :Optional[Any] = ConfigTester(self , config_class=__A )
def __lowerCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCAmelCase_ :Union[str, Any] = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
lowerCAmelCase_ :Optional[Any] = model_class.from_pretrained(__A , output_loading_info=__A )
self.assertEqual(info["""missing_keys"""] , [] )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
lowerCAmelCase_ :Any = model_class(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :str = copy.deepcopy(self._prepare_for_class(__A , __A ) )
if not self.is_encoder_decoder:
lowerCAmelCase_ :List[str] = inputs["""input_ids"""]
del inputs["input_ids"]
else:
lowerCAmelCase_ :Tuple = inputs["""input_ids"""]
lowerCAmelCase_ :Tuple = inputs.get("""decoder_input_ids""" , __A )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , __A )
lowerCAmelCase_ :Any = model.get_input_embeddings()
if not self.is_encoder_decoder:
lowerCAmelCase_ :Optional[int] = wte(__A )
else:
lowerCAmelCase_ :Tuple = wte(__A )
lowerCAmelCase_ :List[str] = wte(__A )
with torch.no_grad():
model(**__A )[0]
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ :str = input_dict["""input_ids"""]
lowerCAmelCase_ :Optional[int] = input_ids.ne(1 ).to(__A )
lowerCAmelCase_ :int = MaMaaaForConditionalGeneration(__A ).eval().to(__A )
if torch_device == "cuda":
model.half()
model.generate(__A , attention_mask=__A )
model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 )
def _snake_case ( lowercase__ : int ) -> List[str]:
'''simple docstring'''
return torch.tensor(lowercase__ , dtype=torch.long , device=lowercase__ )
__UpperCAmelCase = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> Dict:
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[int] = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(__A )
lowerCAmelCase_ :Dict = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
lowerCAmelCase_ :Optional[int] = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
lowerCAmelCase_ :List[str] = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
lowerCAmelCase_ :Union[str, Any] = model(**__A )[0]
lowerCAmelCase_ :Union[str, Any] = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , __A )
# change to expected output here
lowerCAmelCase_ :List[Any] = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Dict = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__A )
# change to intended input
lowerCAmelCase_ :Tuple = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
lowerCAmelCase_ :Optional[Any] = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
lowerCAmelCase_ :str = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
lowerCAmelCase_ :str = model(**__A )[0]
lowerCAmelCase_ :str = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , __A )
# change to expected output here
lowerCAmelCase_ :Union[str, Any] = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :List[str] = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__A )
lowerCAmelCase_ :Tuple = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
lowerCAmelCase_ :Optional[Any] = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
lowerCAmelCase_ :List[str] = tokenizer(__A , padding=__A , return_tensors="""pt""" )
lowerCAmelCase_ :Dict = model.generate(
input_ids=dct["""input_ids"""].to(__A ) , attention_mask=dct["""attention_mask"""].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
lowerCAmelCase_ :List[str] = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
lowerCAmelCase_ :Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A )
assert generated == expected_en
| 363 |
"""simple docstring"""
import itertools
import math
def _snake_case ( lowercase__ : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = 2
while True:
if is_prime(lowercase__ ):
yield num
num += 1
def _snake_case ( lowercase__ : int = 1_0_0_0_1 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , lowercase__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 364 |
"""simple docstring"""
def _snake_case ( lowercase__ : int = 5_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _snake_case ( lowercase__ : Any ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase_ :Union[str, Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowerCAmelCase_ :Any = 4
lowerCAmelCase_ :List[Any] = 4_8
lowerCAmelCase_ :Optional[Any] = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase_ :Optional[Any] = [6, 6, 6, 6]
lowerCAmelCase_ :List[str] = 6_0
lowerCAmelCase_ :Tuple = [6, 6, 6, 6]
lowerCAmelCase_ :List[Any] = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase_ :int = 4
lowerCAmelCase_ :Union[str, Any] = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowerCAmelCase_ :List[str] = 1
lowerCAmelCase_ :Tuple = 1
lowerCAmelCase_ :Any = 1_2_6
lowerCAmelCase_ :List[str] = 7
lowerCAmelCase_ :int = 255.0
lowerCAmelCase_ :Any = """"""
return config
def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
lowerCAmelCase_ :str = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCAmelCase_ :List[str] = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
lowerCAmelCase_ :List[str] = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
lowerCAmelCase_ :str = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
lowerCAmelCase_ :Dict = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCAmelCase_ :Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCAmelCase_ :Optional[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCAmelCase_ :Optional[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCAmelCase_ :Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase_ :Optional[Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowerCAmelCase_ :str = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowerCAmelCase_ :int = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowerCAmelCase_ :List[Any] = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowerCAmelCase_ :Tuple = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
lowerCAmelCase_ :List[str] = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
lowerCAmelCase_ :str = """layernorm.weight"""
if name == "norm.bias":
lowerCAmelCase_ :Dict = """layernorm.bias"""
if "conv_first" in name:
lowerCAmelCase_ :str = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowerCAmelCase_ :str = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowerCAmelCase_ :Optional[int] = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
lowerCAmelCase_ :Union[str, Any] = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
lowerCAmelCase_ :List[Any] = name.replace("""upsample.2""" , """upsample.convolution_1""" )
lowerCAmelCase_ :Dict = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
lowerCAmelCase_ :int = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
lowerCAmelCase_ :Any = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
lowerCAmelCase_ :int = """swin2sr.""" + name
return name
def _snake_case ( lowercase__ : Any , lowercase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ :List[str] = orig_state_dict.pop(lowercase__ )
if "qkv" in key:
lowerCAmelCase_ :str = key.split(""".""" )
lowerCAmelCase_ :int = int(key_split[1] )
lowerCAmelCase_ :Any = int(key_split[4] )
lowerCAmelCase_ :str = config.embed_dim
if "weight" in key:
lowerCAmelCase_ :Any = val[:dim, :]
lowerCAmelCase_ :List[str] = val[dim : dim * 2, :]
lowerCAmelCase_ :Union[str, Any] = val[-dim:, :]
else:
lowerCAmelCase_ :List[Any] = val[:dim]
lowerCAmelCase_ :Union[str, Any] = val[dim : dim * 2]
lowerCAmelCase_ :Tuple = val[-dim:]
pass
else:
lowerCAmelCase_ :List[Any] = val
return orig_state_dict
def _snake_case ( lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Any = get_config(lowercase__ )
lowerCAmelCase_ :Union[str, Any] = SwinaSRForImageSuperResolution(lowercase__ )
model.eval()
lowerCAmelCase_ :Any = torch.hub.load_state_dict_from_url(lowercase__ , map_location="""cpu""" )
lowerCAmelCase_ :Tuple = convert_state_dict(lowercase__ , lowercase__ )
lowerCAmelCase_ :List[Any] = model.load_state_dict(lowercase__ , strict=lowercase__ )
if len(lowercase__ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowercase__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"""Unexpected key {key} in state_dict""" )
# verify values
lowerCAmelCase_ :Optional[int] = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
lowerCAmelCase_ :Optional[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert("""RGB""" )
lowerCAmelCase_ :int = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowerCAmelCase_ :Optional[Any] = 1_2_6 if """Jpeg""" in checkpoint_url else 2_5_6
lowerCAmelCase_ :Optional[int] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowerCAmelCase_ :Tuple = transforms(lowercase__ ).unsqueeze(0 )
if config.num_channels == 1:
lowerCAmelCase_ :str = pixel_values[:, 0, :, :].unsqueeze(1 )
lowerCAmelCase_ :int = model(lowercase__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowerCAmelCase_ :List[str] = torch.Size([1, 3, 5_1_2, 5_1_2] )
lowerCAmelCase_ :Dict = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase_ :List[str] = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
lowerCAmelCase_ :Optional[Any] = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowerCAmelCase_ :List[Any] = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
lowerCAmelCase_ :Optional[Any] = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase_ :Union[str, Any] = torch.Size([1, 3, 5_1_2, 5_1_2] )
lowerCAmelCase_ :Optional[int] = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase_ :Dict = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
lowerCAmelCase_ :Any = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase__ , atol=1E-3 )
print("""Looks ok!""" )
lowerCAmelCase_ :List[Any] = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
lowerCAmelCase_ :List[Any] = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowercase__ )
if push_to_hub:
model.push_to_hub(f"""caidas/{model_name}""" )
processor.push_to_hub(f"""caidas/{model_name}""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
__UpperCAmelCase = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 365 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
UpperCAmelCase_ :Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ :Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :List[Any] = CLIPTextModel(__A )
lowerCAmelCase_ :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Union[str, Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> List[str]:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Tuple = torch.manual_seed(__A )
else:
lowerCAmelCase_ :Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :List[Any] = 2
lowerCAmelCase_ :int = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , )
lowerCAmelCase_ :Optional[int] = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :List[str] = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__A ):
if isinstance(__A , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :str = CLIPTextModel(__A )
lowerCAmelCase_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Optional[Any] = MultiControlNetModel([controlneta, controlneta] )
lowerCAmelCase_ :List[Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> str:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Optional[Any] = torch.manual_seed(__A )
else:
lowerCAmelCase_ :List[Any] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :Optional[Any] = 2
lowerCAmelCase_ :Optional[int] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
]
lowerCAmelCase_ :int = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
lowerCAmelCase_ :Union[str, Any] = 1_0.0
lowerCAmelCase_ :Union[str, Any] = 4
lowerCAmelCase_ :Tuple = self.get_dummy_inputs(__A )
lowerCAmelCase_ :List[str] = steps
lowerCAmelCase_ :int = scale
lowerCAmelCase_ :Union[str, Any] = pipe(**__A )[0]
lowerCAmelCase_ :Any = self.get_dummy_inputs(__A )
lowerCAmelCase_ :str = steps
lowerCAmelCase_ :str = scale
lowerCAmelCase_ :Tuple = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Union[str, Any] = steps
lowerCAmelCase_ :Union[str, Any] = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowerCAmelCase_ :List[str] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Optional[int] = steps
lowerCAmelCase_ :Tuple = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowerCAmelCase ( self ) -> Dict:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> Optional[int]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :str = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__A )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Any = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
lowerCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=__A , controlnet=__A )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase_ :List[Any] = """evil space-punk bird"""
lowerCAmelCase_ :List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
lowerCAmelCase_ :int = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
lowerCAmelCase_ :Union[str, Any] = pipe(
__A , __A , control_image=__A , generator=__A , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
lowerCAmelCase_ :Tuple = output.images[0]
assert image.shape == (512, 512, 3)
lowerCAmelCase_ :Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 1 | 0 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=13 , __A=7 , __A=False , __A=True , __A=False , __A=True , __A=33 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.0_2 , __A=3 , __A=4 , __A=None , ) -> Dict:
lowerCAmelCase_ :Dict = parent
lowerCAmelCase_ :List[Any] = batch_size
lowerCAmelCase_ :Tuple = seq_length
lowerCAmelCase_ :Optional[Any] = is_training
lowerCAmelCase_ :str = use_input_mask
lowerCAmelCase_ :Optional[int] = use_token_type_ids
lowerCAmelCase_ :List[str] = use_labels
lowerCAmelCase_ :Union[str, Any] = vocab_size
lowerCAmelCase_ :List[Any] = hidden_size
lowerCAmelCase_ :Optional[Any] = num_hidden_layers
lowerCAmelCase_ :Dict = num_attention_heads
lowerCAmelCase_ :Dict = intermediate_size
lowerCAmelCase_ :List[Any] = hidden_act
lowerCAmelCase_ :str = hidden_dropout_prob
lowerCAmelCase_ :str = attention_probs_dropout_prob
lowerCAmelCase_ :Dict = max_position_embeddings
lowerCAmelCase_ :Optional[int] = type_vocab_size
lowerCAmelCase_ :Optional[int] = type_sequence_label_size
lowerCAmelCase_ :List[Any] = initializer_range
lowerCAmelCase_ :Union[str, Any] = num_labels
lowerCAmelCase_ :int = num_choices
lowerCAmelCase_ :Union[str, Any] = scope
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ :Union[str, Any] = None
if self.use_input_mask:
lowerCAmelCase_ :Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ :Optional[Any] = None
lowerCAmelCase_ :List[Any] = None
lowerCAmelCase_ :Any = None
if self.use_labels:
lowerCAmelCase_ :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ :List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ :str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> Tuple:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A ) -> str:
lowerCAmelCase_ :str = EsmModel(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Dict = model(__A , attention_mask=__A )
lowerCAmelCase_ :Any = model(__A )
lowerCAmelCase_ :List[Any] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A ) -> List[str]:
lowerCAmelCase_ :List[Any] = EsmForMaskedLM(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Optional[int] = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A ) -> int:
lowerCAmelCase_ :List[Any] = self.num_labels
lowerCAmelCase_ :Optional[int] = EsmForTokenClassification(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Dict = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Any = self.prepare_config_and_inputs()
(
lowerCAmelCase_
) :Optional[int] = config_and_inputs
lowerCAmelCase_ :Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :Dict = False
UpperCAmelCase_ :List[Any] = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ :Union[str, Any] = ()
UpperCAmelCase_ :List[str] = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ :Any = True
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Any = EsmModelTester(self )
lowerCAmelCase_ :List[str] = ConfigTester(self , config_class=__A , hidden_size=37 )
def __lowerCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ :List[Any] = type
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ :Any = EsmModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCAmelCase_ :Dict = EsmEmbeddings(config=__A )
lowerCAmelCase_ :Optional[Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCAmelCase_ :str = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCAmelCase_ :List[Any] = create_position_ids_from_input_ids(__A , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__A , __A ) ) )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Tuple = self.model_tester.prepare_config_and_inputs()[0]
lowerCAmelCase_ :List[str] = EsmEmbeddings(config=__A )
lowerCAmelCase_ :int = torch.empty(2 , 4 , 30 )
lowerCAmelCase_ :Tuple = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCAmelCase_ :Dict = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCAmelCase_ :List[str] = embeddings.create_position_ids_from_inputs_embeds(__A )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__A , __A ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def __lowerCAmelCase ( self ) -> Dict:
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def __lowerCAmelCase ( self ) -> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCAmelCase ( self ) -> str:
pass
@require_torch
class _SCREAMING_SNAKE_CASE ( A__ ):
@slow
def __lowerCAmelCase ( self ) -> List[str]:
with torch.no_grad():
lowerCAmelCase_ :int = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCAmelCase_ :List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase_ :Optional[int] = model(__A )[0]
lowerCAmelCase_ :Optional[Any] = 33
lowerCAmelCase_ :Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __A )
lowerCAmelCase_ :List[str] = torch.tensor(
[[[8.9_2_1_5, -10.5898, -6.4_6_7_1], [-6.3_9_6_7, -13.9114, -1.1_2_1_2], [-7.7_8_1_2, -13.9516, -3.7_4_0_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ) -> List[str]:
with torch.no_grad():
lowerCAmelCase_ :Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCAmelCase_ :Dict = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase_ :Union[str, Any] = model(__A )[0]
# compare the actual values for a slice.
lowerCAmelCase_ :List[str] = torch.tensor(
[[[0.1_4_4_4, 0.5_4_1_3, 0.3_2_4_8], [0.3_0_3_4, 0.0_0_5_3, 0.3_1_0_8], [0.3_2_2_8, -0.2_4_9_9, 0.3_4_1_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) )
| 366 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ ):
UpperCAmelCase_ :List[str] = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self , __A , __A , __A = None , __A = 5_0257 , __A = 1024 , __A = 768 , __A = 12 , __A = 12 , __A = None , __A = "gelu_new" , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 1E-5 , __A = 0.0_2 , __A = True , __A = True , __A = False , __A = False , ) -> Optional[Any]:
super().__init__()
lowerCAmelCase_ :List[str] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
lowerCAmelCase_ :Union[str, Any] = prefix_inner_dim
lowerCAmelCase_ :str = prefix_hidden_dim
lowerCAmelCase_ :str = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCAmelCase_ :List[Any] = (
nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCAmelCase_ :Any = GPTaConfig(
vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , )
lowerCAmelCase_ :Any = GPTaLMHeadModel(__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , __A = None , ) -> List[str]:
lowerCAmelCase_ :str = self.transformer.transformer.wte(__A )
lowerCAmelCase_ :Any = self.encode_prefix(__A )
lowerCAmelCase_ :Optional[Any] = self.decode_prefix(__A )
lowerCAmelCase_ :Optional[int] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCAmelCase_ :int = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCAmelCase_ :Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCAmelCase_ :Tuple = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __lowerCAmelCase ( self , __A , __A ) -> torch.Tensor:
return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
return self.encode_prefix(__A )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[int]:
lowerCAmelCase_ :Tuple = torch.split(__A , 1 , dim=0 )
lowerCAmelCase_ :Optional[int] = []
lowerCAmelCase_ :List[str] = []
for feature in features:
lowerCAmelCase_ :Tuple = self.decode_prefix(feature.to(__A ) ) # back to the clip feature
# Only support beam search for now
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.generate_beam(
input_embeds=__A , device=__A , eos_token_id=__A )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCAmelCase_ :Tuple = torch.stack(__A )
lowerCAmelCase_ :int = torch.stack(__A )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A = 5 , __A = 67 , __A = 1.0 , __A = None , ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = eos_token_id
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :Any = None
lowerCAmelCase_ :int = torch.ones(__A , device=__A , dtype=torch.int )
lowerCAmelCase_ :Optional[int] = torch.zeros(__A , device=__A , dtype=torch.bool )
if input_embeds is not None:
lowerCAmelCase_ :List[str] = input_embeds
else:
lowerCAmelCase_ :Union[str, Any] = self.transformer.transformer.wte(__A )
for i in range(__A ):
lowerCAmelCase_ :Optional[int] = self.transformer(inputs_embeds=__A )
lowerCAmelCase_ :str = outputs.logits
lowerCAmelCase_ :str = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCAmelCase_ :Dict = logits.softmax(-1 ).log()
if scores is None:
lowerCAmelCase_ , lowerCAmelCase_ :Any = logits.topk(__A , -1 )
lowerCAmelCase_ :Union[str, Any] = generated.expand(__A , *generated.shape[1:] )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCAmelCase_ :List[str] = next_tokens
else:
lowerCAmelCase_ :List[Any] = tokens.expand(__A , *tokens.shape[1:] )
lowerCAmelCase_ :Any = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCAmelCase_ :List[Any] = -float(np.inf )
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Optional[int] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCAmelCase_ :List[Any] = scores_sum / seq_lengths[:, None]
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = scores_sum_average.view(-1 ).topk(__A , -1 )
lowerCAmelCase_ :Optional[Any] = next_tokens // scores_sum.shape[1]
lowerCAmelCase_ :Dict = seq_lengths[next_tokens_source]
lowerCAmelCase_ :Tuple = next_tokens % scores_sum.shape[1]
lowerCAmelCase_ :Optional[Any] = next_tokens.unsqueeze(1 )
lowerCAmelCase_ :str = tokens[next_tokens_source]
lowerCAmelCase_ :List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
lowerCAmelCase_ :Dict = generated[next_tokens_source]
lowerCAmelCase_ :Dict = scores_sum_average * seq_lengths
lowerCAmelCase_ :Tuple = is_stopped[next_tokens_source]
lowerCAmelCase_ :str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCAmelCase_ :List[Any] = torch.cat((generated, next_token_embed) , dim=1 )
lowerCAmelCase_ :Optional[int] = is_stopped + next_tokens.eq(__A ).squeeze()
if is_stopped.all():
break
lowerCAmelCase_ :str = scores / seq_lengths
lowerCAmelCase_ :Optional[int] = scores.argsort(descending=__A )
# tokens tensors are already padded to max_seq_length
lowerCAmelCase_ :Optional[Any] = [tokens[i] for i in order]
lowerCAmelCase_ :Dict = torch.stack(__A , dim=0 )
lowerCAmelCase_ :Tuple = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 1 | 0 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[str] = (UnCLIPScheduler,)
def __lowerCAmelCase ( self , **__A ) -> Optional[int]:
lowerCAmelCase_ :Optional[int] = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**__A )
return config
def __lowerCAmelCase ( self ) -> Dict:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def __lowerCAmelCase ( self ) -> str:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__A )
def __lowerCAmelCase ( self ) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__A )
def __lowerCAmelCase ( self ) -> int:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__A )
def __lowerCAmelCase ( self ) -> Any:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__A , prev_timestep=__A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[int] = self.scheduler_classes[0]
lowerCAmelCase_ :List[str] = self.get_scheduler_config(variance_type="""fixed_small_log""" )
lowerCAmelCase_ :Optional[Any] = scheduler_class(**__A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_5_4_9_6_2_5 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_9_9_4_9_8_7 ) ) < 1E-5
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Dict = self.scheduler_classes[0]
lowerCAmelCase_ :List[Any] = self.get_scheduler_config(variance_type="""learned_range""" )
lowerCAmelCase_ :Union[str, Any] = scheduler_class(**__A )
lowerCAmelCase_ :Union[str, Any] = 0.5
assert scheduler._get_variance(1 , predicted_variance=__A ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=__A ) - -5.7_9_9_8_0_5_2 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=__A ) - -0.0_0_1_0_0_1_1 < 1E-5
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Any = self.scheduler_classes[0]
lowerCAmelCase_ :int = self.get_scheduler_config()
lowerCAmelCase_ :int = scheduler_class(**__A )
lowerCAmelCase_ :Tuple = scheduler.timesteps
lowerCAmelCase_ :List[str] = self.dummy_model()
lowerCAmelCase_ :Optional[Any] = self.dummy_sample_deter
lowerCAmelCase_ :str = torch.manual_seed(0 )
for i, t in enumerate(__A ):
# 1. predict noise residual
lowerCAmelCase_ :List[str] = model(__A , __A )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ :Optional[int] = scheduler.step(__A , __A , __A , generator=__A ).prev_sample
lowerCAmelCase_ :int = pred_prev_sample
lowerCAmelCase_ :Union[str, Any] = torch.sum(torch.abs(__A ) )
lowerCAmelCase_ :Union[str, Any] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1E-3
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :str = self.scheduler_classes[0]
lowerCAmelCase_ :Union[str, Any] = self.get_scheduler_config()
lowerCAmelCase_ :Dict = scheduler_class(**__A )
scheduler.set_timesteps(25 )
lowerCAmelCase_ :Optional[Any] = scheduler.timesteps
lowerCAmelCase_ :Any = self.dummy_model()
lowerCAmelCase_ :Tuple = self.dummy_sample_deter
lowerCAmelCase_ :Tuple = torch.manual_seed(0 )
for i, t in enumerate(__A ):
# 1. predict noise residual
lowerCAmelCase_ :Optional[int] = model(__A , __A )
if i + 1 == timesteps.shape[0]:
lowerCAmelCase_ :int = None
else:
lowerCAmelCase_ :str = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ :Optional[Any] = scheduler.step(
__A , __A , __A , prev_timestep=__A , generator=__A ).prev_sample
lowerCAmelCase_ :List[str] = pred_prev_sample
lowerCAmelCase_ :Tuple = torch.sum(torch.abs(__A ) )
lowerCAmelCase_ :Dict = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1E-3
def __lowerCAmelCase ( self ) -> Tuple:
pass
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
| 367 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "detr"
UpperCAmelCase_ :str = ["past_key_values"]
UpperCAmelCase_ :Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __A=True , __A=None , __A=3 , __A=100 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.0_2 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=1 , __A=5 , __A=2 , __A=1 , __A=1 , __A=5 , __A=2 , __A=0.1 , **__A , ) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase_ :int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__A , __A ):
lowerCAmelCase_ :str = backbone_config.get("""model_type""" )
lowerCAmelCase_ :List[Any] = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ :Optional[Any] = config_class.from_dict(__A )
# set timm attributes to None
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = None, None, None
lowerCAmelCase_ :Tuple = use_timm_backbone
lowerCAmelCase_ :Optional[int] = backbone_config
lowerCAmelCase_ :Optional[int] = num_channels
lowerCAmelCase_ :int = num_queries
lowerCAmelCase_ :List[Any] = d_model
lowerCAmelCase_ :Optional[int] = encoder_ffn_dim
lowerCAmelCase_ :Tuple = encoder_layers
lowerCAmelCase_ :int = encoder_attention_heads
lowerCAmelCase_ :Optional[Any] = decoder_ffn_dim
lowerCAmelCase_ :List[str] = decoder_layers
lowerCAmelCase_ :Dict = decoder_attention_heads
lowerCAmelCase_ :Dict = dropout
lowerCAmelCase_ :Tuple = attention_dropout
lowerCAmelCase_ :Union[str, Any] = activation_dropout
lowerCAmelCase_ :Any = activation_function
lowerCAmelCase_ :List[str] = init_std
lowerCAmelCase_ :Optional[int] = init_xavier_std
lowerCAmelCase_ :int = encoder_layerdrop
lowerCAmelCase_ :Union[str, Any] = decoder_layerdrop
lowerCAmelCase_ :List[str] = encoder_layers
lowerCAmelCase_ :Union[str, Any] = auxiliary_loss
lowerCAmelCase_ :str = position_embedding_type
lowerCAmelCase_ :List[Any] = backbone
lowerCAmelCase_ :str = use_pretrained_backbone
lowerCAmelCase_ :str = dilation
# Hungarian matcher
lowerCAmelCase_ :List[Any] = class_cost
lowerCAmelCase_ :Union[str, Any] = bbox_cost
lowerCAmelCase_ :Tuple = giou_cost
# Loss coefficients
lowerCAmelCase_ :Optional[int] = mask_loss_coefficient
lowerCAmelCase_ :Union[str, Any] = dice_loss_coefficient
lowerCAmelCase_ :Tuple = bbox_loss_coefficient
lowerCAmelCase_ :Tuple = giou_loss_coefficient
lowerCAmelCase_ :Dict = eos_coefficient
super().__init__(is_encoder_decoder=__A , **__A )
@property
def __lowerCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) -> int:
return self.d_model
@classmethod
def __lowerCAmelCase ( cls , __A , **__A ) -> Any:
return cls(backbone_config=__A , **__A )
def __lowerCAmelCase ( self ) -> Dict[str, any]:
lowerCAmelCase_ :List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase_ :Dict = self.backbone_config.to_dict()
lowerCAmelCase_ :str = self.__class__.model_type
return output
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[Any] = version.parse("1.11" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __lowerCAmelCase ( self ) -> float:
return 1E-5
@property
def __lowerCAmelCase ( self ) -> int:
return 12
| 1 | 0 |
"""simple docstring"""
import math
def _snake_case ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ :Tuple = input("""Enter message: """ )
lowerCAmelCase_ :Optional[Any] = int(input(f"""Enter key [2-{len(lowercase__ ) - 1}]: """ ) )
lowerCAmelCase_ :Tuple = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowerCAmelCase_ :List[Any] = encrypt_message(lowercase__ , lowercase__ )
elif mode.lower().startswith("""d""" ):
lowerCAmelCase_ :Tuple = decrypt_message(lowercase__ , lowercase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + "|"}""" )
def _snake_case ( lowercase__ : int , lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = [""""""] * key
for col in range(lowercase__ ):
lowerCAmelCase_ :str = col
while pointer < len(lowercase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowercase__ )
def _snake_case ( lowercase__ : int , lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Dict = math.ceil(len(lowercase__ ) / key )
lowerCAmelCase_ :int = key
lowerCAmelCase_ :List[str] = (num_cols * num_rows) - len(lowercase__ )
lowerCAmelCase_ :Dict = [""""""] * num_cols
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Tuple = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
lowerCAmelCase_ :Any = 0
row += 1
return "".join(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 368 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['DeiTFeatureExtractor']
__UpperCAmelCase = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 0 |
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = 'PoolFormerConfig'
# Base docstring
__UpperCAmelCase = 'sail/poolformer_s12'
__UpperCAmelCase = [1, 5_12, 7, 7]
# Image classification docstring
__UpperCAmelCase = 'sail/poolformer_s12'
__UpperCAmelCase = 'tabby, tabby cat'
__UpperCAmelCase = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : float = 0.0 , lowercase__ : bool = False ) -> Any:
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
lowerCAmelCase_ :str = 1 - drop_prob
lowerCAmelCase_ :List[str] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
lowerCAmelCase_ :Any = keep_prob + torch.rand(lowercase__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
lowerCAmelCase_ :Union[str, Any] = input.div(lowercase__ ) * random_tensor
return output
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A = None ) -> None:
super().__init__()
lowerCAmelCase_ :int = drop_prob
def __lowerCAmelCase ( self , __A ) -> torch.Tensor:
return drop_path(__A , self.drop_prob , self.training )
def __lowerCAmelCase ( self ) -> str:
return "p={}".format(self.drop_prob )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A , __A , __A , __A , __A , __A=None ) -> Dict:
super().__init__()
lowerCAmelCase_ :List[str] = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size)
lowerCAmelCase_ :Any = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride)
lowerCAmelCase_ :Optional[int] = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding)
lowerCAmelCase_ :Dict = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A )
lowerCAmelCase_ :Any = norm_layer(__A ) if norm_layer else nn.Identity()
def __lowerCAmelCase ( self , __A ) -> Dict:
lowerCAmelCase_ :Any = self.projection(__A )
lowerCAmelCase_ :Union[str, Any] = self.norm(__A )
return embeddings
class _SCREAMING_SNAKE_CASE ( nn.GroupNorm ):
def __init__( self , __A , **__A ) -> List[str]:
super().__init__(1 , __A , **__A )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A ) -> Any:
super().__init__()
lowerCAmelCase_ :Union[str, Any] = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A )
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
return self.pool(__A ) - hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A , __A , __A , __A ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase_ :str = nn.Convad(__A , __A , 1 )
lowerCAmelCase_ :List[str] = nn.Convad(__A , __A , 1 )
lowerCAmelCase_ :Tuple = PoolFormerDropPath(__A )
if isinstance(config.hidden_act , __A ):
lowerCAmelCase_ :Optional[int] = ACTaFN[config.hidden_act]
else:
lowerCAmelCase_ :Tuple = config.hidden_act
def __lowerCAmelCase ( self , __A ) -> List[str]:
lowerCAmelCase_ :Tuple = self.conva(__A )
lowerCAmelCase_ :Union[str, Any] = self.act_fn(__A )
lowerCAmelCase_ :Union[str, Any] = self.drop(__A )
lowerCAmelCase_ :Dict = self.conva(__A )
lowerCAmelCase_ :Any = self.drop(__A )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A , __A , __A , __A , __A , __A ) -> Any:
super().__init__()
lowerCAmelCase_ :Union[str, Any] = PoolFormerPooling(__A )
lowerCAmelCase_ :List[Any] = PoolFormerOutput(__A , __A , __A , __A )
lowerCAmelCase_ :Union[str, Any] = PoolFormerGroupNorm(__A )
lowerCAmelCase_ :Optional[int] = PoolFormerGroupNorm(__A )
# Useful for training neural nets
lowerCAmelCase_ :Union[str, Any] = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity()
lowerCAmelCase_ :Tuple = config.use_layer_scale
if config.use_layer_scale:
lowerCAmelCase_ :str = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
lowerCAmelCase_ :List[Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
def __lowerCAmelCase ( self , __A ) -> List[str]:
if self.use_layer_scale:
lowerCAmelCase_ :Tuple = self.pooling(self.before_norm(__A ) )
lowerCAmelCase_ :Union[str, Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
lowerCAmelCase_ :List[str] = hidden_states + self.drop_path(__A )
lowerCAmelCase_ :Any = ()
lowerCAmelCase_ :Optional[Any] = self.output(self.after_norm(__A ) )
lowerCAmelCase_ :int = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
lowerCAmelCase_ :Tuple = hidden_states + self.drop_path(__A )
lowerCAmelCase_ :Tuple = (output,) + outputs
return outputs
else:
lowerCAmelCase_ :Any = self.drop_path(self.pooling(self.before_norm(__A ) ) )
# First residual connection
lowerCAmelCase_ :Any = pooling_output + hidden_states
lowerCAmelCase_ :Optional[int] = ()
# Second residual connection inside the PoolFormerOutput block
lowerCAmelCase_ :Optional[int] = self.drop_path(self.output(self.after_norm(__A ) ) )
lowerCAmelCase_ :Optional[int] = hidden_states + layer_output
lowerCAmelCase_ :List[Any] = (output,) + outputs
return outputs
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A ) -> Tuple:
super().__init__()
lowerCAmelCase_ :Optional[Any] = config
# stochastic depth decay rule
lowerCAmelCase_ :Any = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
lowerCAmelCase_ :int = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
lowerCAmelCase_ :List[Any] = nn.ModuleList(__A )
# Transformer blocks
lowerCAmelCase_ :Union[str, Any] = []
lowerCAmelCase_ :int = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
lowerCAmelCase_ :Dict = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__A ) )
lowerCAmelCase_ :Tuple = nn.ModuleList(__A )
def __lowerCAmelCase ( self , __A , __A=False , __A=True ) -> List[Any]:
lowerCAmelCase_ :Union[str, Any] = () if output_hidden_states else None
lowerCAmelCase_ :Dict = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
lowerCAmelCase_ :int = layers
# Get patch embeddings from hidden_states
lowerCAmelCase_ :List[str] = embedding_layer(__A )
# Send the embeddings through the blocks
for _, blk in enumerate(__A ):
lowerCAmelCase_ :int = blk(__A )
lowerCAmelCase_ :int = layer_outputs[0]
if output_hidden_states:
lowerCAmelCase_ :Dict = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Optional[Any] = PoolFormerConfig
UpperCAmelCase_ :Dict = "poolformer"
UpperCAmelCase_ :Union[str, Any] = "pixel_values"
UpperCAmelCase_ :Tuple = True
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
if isinstance(__A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def __lowerCAmelCase ( self , __A , __A=False ) -> Tuple:
if isinstance(__A , __A ):
lowerCAmelCase_ :List[Any] = value
__UpperCAmelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__UpperCAmelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , A__ , )
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> List[Any]:
super().__init__(__A )
lowerCAmelCase_ :Dict = config
lowerCAmelCase_ :str = PoolFormerEncoder(__A )
# Initialize weights and apply final processing
self.post_init()
def __lowerCAmelCase ( self ) -> Optional[int]:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowerCAmelCase ( self , __A = None , __A = None , __A = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
lowerCAmelCase_ :List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ :Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
lowerCAmelCase_ :Optional[Any] = self.encoder(
__A , output_hidden_states=__A , return_dict=__A , )
lowerCAmelCase_ :Optional[Any] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A ) -> Optional[int]:
super().__init__()
lowerCAmelCase_ :str = nn.Linear(config.hidden_size , config.hidden_size )
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
lowerCAmelCase_ :List[Any] = self.dense(__A )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , A__ , )
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> int:
super().__init__(__A )
lowerCAmelCase_ :List[str] = config.num_labels
lowerCAmelCase_ :Tuple = PoolFormerModel(__A )
# Final norm
lowerCAmelCase_ :Optional[int] = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
lowerCAmelCase_ :Optional[int] = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowerCAmelCase ( self , __A = None , __A = None , __A = None , __A = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
lowerCAmelCase_ :Any = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ :List[str] = self.poolformer(
__A , output_hidden_states=__A , return_dict=__A , )
lowerCAmelCase_ :Optional[int] = outputs[0]
lowerCAmelCase_ :Dict = self.classifier(self.norm(__A ).mean([-2, -1] ) )
lowerCAmelCase_ :Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase_ :str = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase_ :List[Any] = """single_label_classification"""
else:
lowerCAmelCase_ :Union[str, Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
lowerCAmelCase_ :Optional[Any] = MSELoss()
if self.num_labels == 1:
lowerCAmelCase_ :Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase_ :Optional[int] = loss_fct(__A , __A )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase_ :Union[str, Any] = CrossEntropyLoss()
lowerCAmelCase_ :Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase_ :Optional[Any] = BCEWithLogitsLoss()
lowerCAmelCase_ :Optional[Any] = loss_fct(__A , __A )
if not return_dict:
lowerCAmelCase_ :List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states )
| 369 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 0 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : bool = False ) -> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(lowercase__ ), magnitude * sin(lowercase__ )]
return [magnitude * cos(radians(lowercase__ ) ), magnitude * sin(radians(lowercase__ ) )]
def _snake_case ( lowercase__ : NDArray[floataa] , lowercase__ : NDArray[floataa] , lowercase__ : float = 1_0**-1 ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :NDArray[floataa] = cross(lowercase__ , lowercase__ )
lowerCAmelCase_ :float = sum(lowercase__ )
return abs(lowercase__ ) < eps
if __name__ == "__main__":
# Test to check if it works
__UpperCAmelCase = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
__UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__UpperCAmelCase = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
__UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__UpperCAmelCase = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
__UpperCAmelCase = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 370 |
"""simple docstring"""
__UpperCAmelCase = 2_56
# Modulus to hash a string
__UpperCAmelCase = 1_00_00_03
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :Tuple = len(lowercase__ )
lowerCAmelCase_ :List[str] = len(lowercase__ )
if p_len > t_len:
return False
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :Any = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase__ ):
lowerCAmelCase_ :int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCAmelCase_ :Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCAmelCase_ :Optional[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCAmelCase_ :Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _snake_case ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ :int = """abc1abc12"""
lowerCAmelCase_ :Dict = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowerCAmelCase_ :int = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(lowercase__ , lowercase__ ) and not rabin_karp(lowercase__ , lowercase__ )
# Test 2)
lowerCAmelCase_ :Dict = """ABABX"""
lowerCAmelCase_ :int = """ABABZABABYABABX"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 3)
lowerCAmelCase_ :Union[str, Any] = """AAAB"""
lowerCAmelCase_ :List[str] = """ABAAAAAB"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 4)
lowerCAmelCase_ :Dict = """abcdabcy"""
lowerCAmelCase_ :Union[str, Any] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 5)
lowerCAmelCase_ :Optional[int] = """Lü"""
lowerCAmelCase_ :Optional[int] = """Lüsai"""
assert rabin_karp(lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[int] = """Lue"""
assert not rabin_karp(lowercase__ , lowercase__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 1 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
UpperCAmelCase_ :List[Any] = StableDiffusionLDMaDPipeline
UpperCAmelCase_ :Dict = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase_ :Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase_ :List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowerCAmelCase_ :Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :Tuple = CLIPTextModel(__A )
lowerCAmelCase_ :Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> int:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :List[str] = torch.manual_seed(__A )
else:
lowerCAmelCase_ :str = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ :int = self.get_dummy_components()
lowerCAmelCase_ :Tuple = StableDiffusionLDMaDPipeline(**__A )
lowerCAmelCase_ :Tuple = ldmad_pipe.to(__A )
ldmad_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Union[str, Any] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :List[str] = ldmad_pipe(**__A )
lowerCAmelCase_ :Dict = output.rgb, output.depth
lowerCAmelCase_ :List[str] = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ :int = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ :Any = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
lowerCAmelCase_ :Union[str, Any] = np.array([103.4_6727, 85.81_2004, 87.84_9236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Any = self.get_dummy_components()
lowerCAmelCase_ :List[Any] = StableDiffusionLDMaDPipeline(**__A )
lowerCAmelCase_ :int = ldmad_pipe.to(__A )
ldmad_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Union[str, Any] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :str = 3 * [inputs["""prompt"""]]
# forward
lowerCAmelCase_ :Any = ldmad_pipe(**__A )
lowerCAmelCase_ :Optional[int] = output.rgb, output.depth
lowerCAmelCase_ :int = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ :Optional[Any] = depth_slice_a[0, -3:, -1]
lowerCAmelCase_ :Dict = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Optional[Any] = 3 * [inputs.pop("""prompt""" )]
lowerCAmelCase_ :Dict = ldmad_pipe.tokenizer(
__A , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__A , return_tensors="""pt""" , )
lowerCAmelCase_ :Optional[int] = text_inputs["""input_ids"""].to(__A )
lowerCAmelCase_ :List[Any] = ldmad_pipe.text_encoder(__A )[0]
lowerCAmelCase_ :Any = prompt_embeds
# forward
lowerCAmelCase_ :Optional[int] = ldmad_pipe(**__A )
lowerCAmelCase_ :Dict = output.rgb, output.depth
lowerCAmelCase_ :Tuple = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ :List[Any] = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ :Tuple = self.get_dummy_components()
lowerCAmelCase_ :List[Any] = PNDMScheduler(skip_prk_steps=__A )
lowerCAmelCase_ :Optional[Any] = StableDiffusionLDMaDPipeline(**__A )
lowerCAmelCase_ :str = ldmad_pipe.to(__A )
ldmad_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :str = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Optional[Any] = """french fries"""
lowerCAmelCase_ :int = ldmad_pipe(**__A , negative_prompt=__A )
lowerCAmelCase_ :Dict = output.rgb, output.depth
lowerCAmelCase_ :int = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ :Dict = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ :Tuple = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
lowerCAmelCase_ :Union[str, Any] = np.array([107.8_4738, 84.6_2802, 89.96_2135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self , __A , __A="cpu" , __A=torch.floataa , __A=0 ) -> Dict:
lowerCAmelCase_ :List[str] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :List[str] = np.random.RandomState(__A ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ :Any = torch.from_numpy(__A ).to(device=__A , dtype=__A )
lowerCAmelCase_ :Any = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Union[str, Any] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
lowerCAmelCase_ :Union[str, Any] = ldmad_pipe.to(__A )
ldmad_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Any = self.get_inputs(__A )
lowerCAmelCase_ :Union[str, Any] = ldmad_pipe(**__A )
lowerCAmelCase_ :Tuple = output.rgb, output.depth
lowerCAmelCase_ :List[str] = rgb[0, -3:, -3:, -1].flatten()
lowerCAmelCase_ :Any = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
lowerCAmelCase_ :int = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
lowerCAmelCase_ :str = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self , __A , __A="cpu" , __A=torch.floataa , __A=0 ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :int = np.random.RandomState(__A ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ :List[Any] = torch.from_numpy(__A ).to(device=__A , dtype=__A )
lowerCAmelCase_ :Optional[Any] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :str = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(__A )
ldmad_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Dict = self.get_inputs(__A )
lowerCAmelCase_ :Any = ldmad_pipe(**__A )
lowerCAmelCase_ :List[str] = output.rgb, output.depth
lowerCAmelCase_ :int = 0.4_9_5_5_8_6
lowerCAmelCase_ :Optional[Any] = 0.3_3_7_9_5_5_1_5
lowerCAmelCase_ :int = 112.4_8518
lowerCAmelCase_ :Any = 98.48_9746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :int = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(__A )
ldmad_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Dict = self.get_inputs(__A )
lowerCAmelCase_ :List[str] = ldmad_pipe(**__A )
lowerCAmelCase_ :Any = output.rgb, output.depth
lowerCAmelCase_ :str = 0.4_1_9_4_1_2_7
lowerCAmelCase_ :List[Any] = 0.3_5_3_7_5_5_8_6
lowerCAmelCase_ :List[Any] = 0.5_6_3_8_5_0_2
lowerCAmelCase_ :List[Any] = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 371 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ :Optional[Any] = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :str = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ :int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ :List[Any] = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ :List[str] = 8
else:
lowerCAmelCase_ :Optional[int] = None
return tokenizer.pad(
lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1":
lowerCAmelCase_ :Optional[Any] = 2
# New Code #
lowerCAmelCase_ :List[str] = int(args.gradient_accumulation_steps )
lowerCAmelCase_ :int = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase_ :str = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :int = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ :Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ :Optional[Any] = AdamW(params=model.parameters() , lr=lowercase__ )
# Instantiate scheduler
lowerCAmelCase_ :Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
with LocalSGD(
accelerator=lowercase__ , model=lowercase__ , local_sgd_steps=lowercase__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase__ ):
lowerCAmelCase_ :str = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = output.loss
accelerator.backward(lowercase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
def _snake_case ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowercase__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=lowercase__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase_ :Optional[Any] = parser.parse_args()
lowerCAmelCase_ :Tuple = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 0 |
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
__UpperCAmelCase = {
'allenai/led-base-16384': 1_63_84,
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Any = VOCAB_FILES_NAMES
UpperCAmelCase_ :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ :Tuple = LEDTokenizer
UpperCAmelCase_ :Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , __A=None , __A=None , __A=None , __A="replace" , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=False , __A=True , **__A , ) -> int:
super().__init__(
__A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , )
lowerCAmelCase_ :str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __A ) != add_prefix_space:
lowerCAmelCase_ :Union[str, Any] = getattr(__A , pre_tok_state.pop("""type""" ) )
lowerCAmelCase_ :Any = add_prefix_space
lowerCAmelCase_ :Dict = pre_tok_class(**__A )
lowerCAmelCase_ :Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCAmelCase_ :int = """post_processor"""
lowerCAmelCase_ :Dict = getattr(self.backend_tokenizer , __A , __A )
if tokenizer_component_instance:
lowerCAmelCase_ :Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase_ :Tuple = tuple(state["""sep"""] )
if "cls" in state:
lowerCAmelCase_ :Dict = tuple(state["""cls"""] )
lowerCAmelCase_ :Dict = False
if state.get("""add_prefix_space""" , __A ) != add_prefix_space:
lowerCAmelCase_ :str = add_prefix_space
lowerCAmelCase_ :str = True
if state.get("""trim_offsets""" , __A ) != trim_offsets:
lowerCAmelCase_ :Tuple = trim_offsets
lowerCAmelCase_ :Tuple = True
if changes_to_apply:
lowerCAmelCase_ :Tuple = getattr(__A , state.pop("""type""" ) )
lowerCAmelCase_ :List[str] = component_class(**__A )
setattr(self.backend_tokenizer , __A , __A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , __A ) -> Any:
lowerCAmelCase_ :Dict = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value
lowerCAmelCase_ :int = value
def __lowerCAmelCase ( self , *__A , **__A ) -> BatchEncoding:
lowerCAmelCase_ :Optional[Any] = kwargs.get("""is_split_into_words""" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__A , **__A )
def __lowerCAmelCase ( self , *__A , **__A ) -> BatchEncoding:
lowerCAmelCase_ :str = kwargs.get("""is_split_into_words""" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A = None ) -> Tuple[str]:
lowerCAmelCase_ :Tuple = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def __lowerCAmelCase ( self , __A , __A=None ) -> Tuple:
lowerCAmelCase_ :List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]:
lowerCAmelCase_ :Any = [self.sep_token_id]
lowerCAmelCase_ :Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , __A , __A = None , __A = PaddingStrategy.DO_NOT_PAD , __A = None , __A = None , ) -> dict:
lowerCAmelCase_ :Tuple = super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
lowerCAmelCase_ :List[str] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCAmelCase_ :List[str] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCAmelCase_ :Dict = len(encoded_inputs["""global_attention_mask"""] ) != len(__A )
if needs_to_be_padded:
lowerCAmelCase_ :Any = len(__A ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCAmelCase_ :Tuple = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
lowerCAmelCase_ :Optional[int] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 350 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 , lowercase__ : str = "bert-base-cased" ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained(lowercase__ )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ :str = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : int ) -> List[str]:
'''simple docstring'''
model.eval()
lowerCAmelCase_ :Dict = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
lowerCAmelCase_ :Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase_ :Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Tuple = metric.compute()
return eval_metric["accuracy"]
def _snake_case ( lowercase__ : str , lowercase__ : List[str] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :Optional[int] = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Optional[Any] = args.model_name_or_path
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :str = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
lowerCAmelCase_ :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ :str = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ :Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCAmelCase_ :Any = 1
lowerCAmelCase_ :str = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ :List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
lowerCAmelCase_ :int = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ :List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ :List[Any] = 0
lowerCAmelCase_ :str = evaluate.load("""glue""" , """mrpc""" )
lowerCAmelCase_ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase_ :Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase_ :Optional[Any] = args.resume_from_checkpoint.split("""epoch_""" )[1]
lowerCAmelCase_ :int = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase_ :Union[str, Any] = int(lowercase__ ) + 1
lowerCAmelCase_ :Optional[int] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.print("""resumed checkpoint performance:""" , lowercase__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
lowerCAmelCase_ :List[str] = json.load(lowercase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase_ :List[Any] = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Dict = outputs.loss
lowerCAmelCase_ :int = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase_ :List[str] = f"""epoch_{epoch}"""
lowerCAmelCase_ :Any = os.path.join(args.output_dir , lowercase__ )
accelerator.save_state(lowercase__ )
lowerCAmelCase_ :List[Any] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :Union[str, Any] = accuracy
lowerCAmelCase_ :Any = lr_scheduler.get_lr()[0]
lowerCAmelCase_ :str = optimizer.param_groups[0]["""lr"""]
lowerCAmelCase_ :List[Any] = epoch
lowerCAmelCase_ :Tuple = overall_step
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def _snake_case ( ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowercase__ , default=lowercase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowercase__ , default=lowercase__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=2 , help="""Number of train epochs.""" , )
lowerCAmelCase_ :Optional[int] = parser.parse_args()
lowerCAmelCase_ :List[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
__UpperCAmelCase = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def _snake_case ( lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = torch.load(lowercase__ , map_location="""cpu""" )
return sd
def _snake_case ( lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : List[str]=rename_keys_prefix ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = OrderedDict()
lowerCAmelCase_ :str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowerCAmelCase_ :List[str] = key
for name_pair in rename_keys_prefix:
lowerCAmelCase_ :Optional[Any] = new_key.replace(name_pair[0] , name_pair[1] )
lowerCAmelCase_ :int = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowerCAmelCase_ :str = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Dict ) -> int:
'''simple docstring'''
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
lowerCAmelCase_ :Optional[Any] = """pretraining"""
if "vcr" in checkpoint_path:
lowerCAmelCase_ :List[Any] = {"""visual_embedding_dim""": 5_1_2}
elif "vqa_advanced" in checkpoint_path:
lowerCAmelCase_ :Optional[Any] = {"""visual_embedding_dim""": 2_0_4_8}
elif "vqa" in checkpoint_path:
lowerCAmelCase_ :int = {"""visual_embedding_dim""": 2_0_4_8}
elif "nlvr" in checkpoint_path:
lowerCAmelCase_ :List[str] = {"""visual_embedding_dim""": 1_0_2_4}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
lowerCAmelCase_ :Tuple = {"""visual_embedding_dim""": 5_1_2}
lowerCAmelCase_ :List[Any] = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
lowerCAmelCase_ :Optional[int] = {"""visual_embedding_dim""": 2_0_4_8}
lowerCAmelCase_ :Tuple = """vqa_advanced"""
elif "vqa" in checkpoint_path:
lowerCAmelCase_ :List[Any] = {"""visual_embedding_dim""": 2_0_4_8, """num_labels""": 3_1_2_9}
lowerCAmelCase_ :Dict = """vqa"""
elif "nlvr" in checkpoint_path:
lowerCAmelCase_ :Optional[Any] = {
"""visual_embedding_dim""": 1_0_2_4,
"""num_labels""": 2,
}
lowerCAmelCase_ :Any = """nlvr"""
lowerCAmelCase_ :List[str] = VisualBertConfig(**lowercase__ )
# Load State Dict
lowerCAmelCase_ :Optional[int] = load_state_dict(lowercase__ )
lowerCAmelCase_ :int = get_new_dict(lowercase__ , lowercase__ )
if model_type == "pretraining":
lowerCAmelCase_ :List[Any] = VisualBertForPreTraining(lowercase__ )
elif model_type == "vqa":
lowerCAmelCase_ :Any = VisualBertForQuestionAnswering(lowercase__ )
elif model_type == "nlvr":
lowerCAmelCase_ :Any = VisualBertForVisualReasoning(lowercase__ )
elif model_type == "multichoice":
lowerCAmelCase_ :Optional[int] = VisualBertForMultipleChoice(lowercase__ )
model.load_state_dict(lowercase__ )
# Save Checkpoints
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
__UpperCAmelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 351 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Union[str, Any]:
if isinstance(__A , __A ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowerCAmelCase_ :Tuple = deepcopy(__A )
elif os.path.exists(__A ):
with io.open(__A , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase_ :str = json.load(__A )
else:
try:
lowerCAmelCase_ :Dict = baseaa.urlsafe_baadecode(__A ).decode("""utf-8""" )
lowerCAmelCase_ :int = json.loads(__A )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
lowerCAmelCase_ :Optional[Any] = config
self.set_stage_and_offload()
def __lowerCAmelCase ( self ) -> Tuple:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowerCAmelCase_ :Tuple = self.get_value("""zero_optimization.stage""" , -1 )
# offload
lowerCAmelCase_ :Dict = False
if self.is_zeroa() or self.is_zeroa():
lowerCAmelCase_ :Optional[int] = set(["""cpu""", """nvme"""] )
lowerCAmelCase_ :Union[str, Any] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowerCAmelCase_ :Optional[int] = True
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
lowerCAmelCase_ :str = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Tuple = ds_key_long.split(""".""" )
lowerCAmelCase_ :List[str] = nodes.pop()
for node in nodes:
lowerCAmelCase_ :Tuple = config.get(__A )
if config is None:
return None, ds_key
return config, ds_key
def __lowerCAmelCase ( self , __A , __A=None ) -> Optional[Any]:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.find_config_node(__A )
if config is None:
return default
return config.get(__A , __A )
def __lowerCAmelCase ( self , __A , __A=False ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Union[str, Any] = ds_key_long.split(""".""" )
for node in nodes:
lowerCAmelCase_ :int = config
lowerCAmelCase_ :Any = config.get(__A )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__A )
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = self.get_value(__A )
return False if value is None else bool(__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
lowerCAmelCase_ :List[str] = self.get_value(__A )
return False if value is None else not bool(__A )
def __lowerCAmelCase ( self ) -> str:
return self._stage == 2
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._stage == 3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._offload
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Optional[int]:
lowerCAmelCase_ :Dict = engine
def __lowerCAmelCase ( self , __A , **__A ) -> str:
# runs backpropagation and handles mixed precision
self.engine.backward(__A , **__A )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> List[str]:
super().__init__(__A , device_placement=__A , scaler=__A )
lowerCAmelCase_ :List[str] = hasattr(self.optimizer , """overflow""" )
def __lowerCAmelCase ( self , __A=None ) -> Optional[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __lowerCAmelCase ( self ) -> List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __lowerCAmelCase ( self ) -> int:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A ) -> Optional[int]:
super().__init__(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=0.0_0_1 , __A=0 , **__A ) -> List[Any]:
lowerCAmelCase_ :str = params
lowerCAmelCase_ :Any = lr
lowerCAmelCase_ :List[Any] = weight_decay
lowerCAmelCase_ :Any = kwargs
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=None , __A=0 , **__A ) -> List[str]:
lowerCAmelCase_ :Optional[int] = optimizer
lowerCAmelCase_ :int = total_num_steps
lowerCAmelCase_ :List[Any] = warmup_num_steps
lowerCAmelCase_ :int = kwargs
| 1 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__UpperCAmelCase = {
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 10_00,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__UpperCAmelCase = {
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 10_00,
'block_out_channels': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__UpperCAmelCase = {
'sample_size': 2_56,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__UpperCAmelCase = {
'num_train_timesteps': 40,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
__UpperCAmelCase = {
'num_train_timesteps': 2_01,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
__UpperCAmelCase = {
'num_train_timesteps': 1_51,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
def _snake_case ( lowercase__ : Tuple ) -> Tuple:
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def _snake_case ( lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : str , lowercase__ : Any , lowercase__ : List[str]=False ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
lowerCAmelCase_ :Dict = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
lowerCAmelCase_ :Dict = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
lowerCAmelCase_ :int = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
lowerCAmelCase_ :Optional[int] = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
lowerCAmelCase_ :List[str] = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
lowerCAmelCase_ :List[str] = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
lowerCAmelCase_ :List[Any] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
lowerCAmelCase_ :List[str] = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
lowerCAmelCase_ :int = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
lowerCAmelCase_ :Optional[int] = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
lowerCAmelCase_ :str = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def _snake_case ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Dict=None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
lowerCAmelCase_ :Union[str, Any] = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
lowerCAmelCase_ :Optional[int] = checkpoint[f"""{old_prefix}.norm.weight"""]
lowerCAmelCase_ :str = checkpoint[f"""{old_prefix}.norm.bias"""]
lowerCAmelCase_ :Any = weight_q.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ :Optional[int] = bias_q.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ :List[Any] = weight_k.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ :Optional[int] = bias_k.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ :Tuple = weight_v.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ :List[Any] = bias_v.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ :Optional[Any] = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
lowerCAmelCase_ :Union[str, Any] = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _snake_case ( lowercase__ : str , lowercase__ : List[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ :str = torch.load(lowercase__ , map_location="""cpu""" )
lowerCAmelCase_ :Optional[Any] = {}
lowerCAmelCase_ :List[Any] = checkpoint["""time_embed.0.weight"""]
lowerCAmelCase_ :List[str] = checkpoint["""time_embed.0.bias"""]
lowerCAmelCase_ :List[str] = checkpoint["""time_embed.2.weight"""]
lowerCAmelCase_ :Any = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
lowerCAmelCase_ :Union[str, Any] = checkpoint["""label_emb.weight"""]
lowerCAmelCase_ :Dict = checkpoint["""input_blocks.0.0.weight"""]
lowerCAmelCase_ :Optional[int] = checkpoint["""input_blocks.0.0.bias"""]
lowerCAmelCase_ :Any = unet_config["""down_block_types"""]
lowerCAmelCase_ :Any = unet_config["""layers_per_block"""]
lowerCAmelCase_ :Optional[Any] = unet_config["""attention_head_dim"""]
lowerCAmelCase_ :str = unet_config["""block_out_channels"""]
lowerCAmelCase_ :Union[str, Any] = 1
lowerCAmelCase_ :Tuple = channels_list[0]
for i, layer_type in enumerate(lowercase__ ):
lowerCAmelCase_ :List[str] = channels_list[i]
lowerCAmelCase_ :Optional[int] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowercase__ ):
lowerCAmelCase_ :List[Any] = f"""down_blocks.{i}.resnets.{j}"""
lowerCAmelCase_ :int = f"""input_blocks.{current_layer}.0"""
lowerCAmelCase_ :Any = True if j == 0 and downsample_block_has_skip else False
lowerCAmelCase_ :Optional[int] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowercase__ ):
lowerCAmelCase_ :List[Any] = f"""down_blocks.{i}.resnets.{j}"""
lowerCAmelCase_ :List[str] = f"""input_blocks.{current_layer}.0"""
lowerCAmelCase_ :Dict = True if j == 0 and downsample_block_has_skip else False
lowerCAmelCase_ :Any = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ )
lowerCAmelCase_ :List[Any] = f"""down_blocks.{i}.attentions.{j}"""
lowerCAmelCase_ :Tuple = f"""input_blocks.{current_layer}.1"""
lowerCAmelCase_ :Dict = convert_attention(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
current_layer += 1
if i != len(lowercase__ ) - 1:
lowerCAmelCase_ :Tuple = f"""down_blocks.{i}.downsamplers.0"""
lowerCAmelCase_ :List[Any] = f"""input_blocks.{current_layer}.0"""
lowerCAmelCase_ :List[str] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
current_layer += 1
lowerCAmelCase_ :Any = current_channels
# hardcoded the mid-block for now
lowerCAmelCase_ :int = """mid_block.resnets.0"""
lowerCAmelCase_ :int = """middle_block.0"""
lowerCAmelCase_ :Union[str, Any] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :List[str] = """mid_block.attentions.0"""
lowerCAmelCase_ :Tuple = """middle_block.1"""
lowerCAmelCase_ :List[Any] = convert_attention(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[int] = """mid_block.resnets.1"""
lowerCAmelCase_ :str = """middle_block.2"""
lowerCAmelCase_ :List[str] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[Any] = 0
lowerCAmelCase_ :Union[str, Any] = unet_config["""up_block_types"""]
for i, layer_type in enumerate(lowercase__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowerCAmelCase_ :Any = f"""up_blocks.{i}.resnets.{j}"""
lowerCAmelCase_ :Optional[Any] = f"""output_blocks.{current_layer}.0"""
lowerCAmelCase_ :Union[str, Any] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ )
current_layer += 1
if i != len(lowercase__ ) - 1:
lowerCAmelCase_ :Any = f"""up_blocks.{i}.upsamplers.0"""
lowerCAmelCase_ :Optional[int] = f"""output_blocks.{current_layer-1}.1"""
lowerCAmelCase_ :Tuple = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowerCAmelCase_ :Optional[int] = f"""up_blocks.{i}.resnets.{j}"""
lowerCAmelCase_ :Optional[int] = f"""output_blocks.{current_layer}.0"""
lowerCAmelCase_ :Any = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ )
lowerCAmelCase_ :int = f"""up_blocks.{i}.attentions.{j}"""
lowerCAmelCase_ :Optional[Any] = f"""output_blocks.{current_layer}.1"""
lowerCAmelCase_ :int = convert_attention(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
current_layer += 1
if i != len(lowercase__ ) - 1:
lowerCAmelCase_ :List[str] = f"""up_blocks.{i}.upsamplers.0"""
lowerCAmelCase_ :Tuple = f"""output_blocks.{current_layer-1}.2"""
lowerCAmelCase_ :List[str] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[Any] = checkpoint["""out.0.weight"""]
lowerCAmelCase_ :Optional[Any] = checkpoint["""out.0.bias"""]
lowerCAmelCase_ :Optional[int] = checkpoint["""out.2.weight"""]
lowerCAmelCase_ :Optional[Any] = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = strabool(args.class_cond)
__UpperCAmelCase = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
__UpperCAmelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__UpperCAmelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__UpperCAmelCase = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
__UpperCAmelCase = None
__UpperCAmelCase = con_pt_to_diffuser(args.unet_path, unet_config)
__UpperCAmelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__UpperCAmelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__UpperCAmelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__UpperCAmelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
__UpperCAmelCase = CMStochasticIterativeScheduler(**scheduler_config)
__UpperCAmelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 352 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCAmelCase_ :List[str] = "CIDAS/clipseg-rd64-refined"
UpperCAmelCase_ :List[Any] = "image_segmenter"
UpperCAmelCase_ :Optional[int] = CLIPSegForImageSegmentation
UpperCAmelCase_ :Tuple = ["image", "text"]
UpperCAmelCase_ :Dict = ["image"]
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A ) -> Any:
return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" )
def __lowerCAmelCase ( self , __A ) -> Tuple:
with torch.no_grad():
lowerCAmelCase_ :Dict = self.model(**__A ).logits
return logits
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :Optional[int] = outputs.cpu().detach().numpy()
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :str = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 1 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :List[str] = RoCBertTokenizer
UpperCAmelCase_ :Optional[Any] = None
UpperCAmelCase_ :Optional[int] = False
UpperCAmelCase_ :int = True
UpperCAmelCase_ :Tuple = filter_non_english
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
lowerCAmelCase_ :Union[str, Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
lowerCAmelCase_ :Dict = {}
lowerCAmelCase_ :Union[str, Any] = {}
for i, value in enumerate(__A ):
lowerCAmelCase_ :List[Any] = i
lowerCAmelCase_ :List[str] = i
lowerCAmelCase_ :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] )
lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer:
json.dump(__A , __A , ensure_ascii=__A )
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(__A , __A , ensure_ascii=__A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase_ :Dict = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(__A , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__A ) , [5, 6, 2, 5, 7, 8] )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[Any] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :str = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :str = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Any = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :int = RoCBertBasicTokenizer(do_lower_case=__A , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
lowerCAmelCase_ :Any = {}
for i, token in enumerate(__A ):
lowerCAmelCase_ :int = i
lowerCAmelCase_ :Union[str, Any] = RoCBertWordpieceTokenizer(vocab=__A , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __lowerCAmelCase ( self ) -> Tuple:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __lowerCAmelCase ( self ) -> Tuple:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[str] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
lowerCAmelCase_ :List[str] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
def __lowerCAmelCase ( self ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__A , **__A )
lowerCAmelCase_ :Union[str, Any] = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowerCAmelCase_ :List[Any] = tokenizer_r.encode_plus(
__A , return_attention_mask=__A , return_token_type_ids=__A , return_offsets_mapping=__A , add_special_tokens=__A , )
lowerCAmelCase_ :List[str] = tokenizer_r.do_lower_case if hasattr(__A , """do_lower_case""" ) else False
lowerCAmelCase_ :List[str] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Tuple = ["""的""", """人""", """有"""]
lowerCAmelCase_ :Optional[Any] = """""".join(__A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ :str = True
lowerCAmelCase_ :Tuple = self.tokenizer_class.from_pretrained(__A , **__A )
lowerCAmelCase_ :str = self.rust_tokenizer_class.from_pretrained(__A , **__A )
lowerCAmelCase_ :Optional[Any] = tokenizer_p.encode(__A , add_special_tokens=__A )
lowerCAmelCase_ :str = tokenizer_r.encode(__A , add_special_tokens=__A )
lowerCAmelCase_ :List[Any] = tokenizer_r.convert_ids_to_tokens(__A )
lowerCAmelCase_ :List[Any] = tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
lowerCAmelCase_ :Union[str, Any] = False
lowerCAmelCase_ :Tuple = self.rust_tokenizer_class.from_pretrained(__A , **__A )
lowerCAmelCase_ :str = self.tokenizer_class.from_pretrained(__A , **__A )
lowerCAmelCase_ :Dict = tokenizer_r.encode(__A , add_special_tokens=__A )
lowerCAmelCase_ :Tuple = tokenizer_p.encode(__A , add_special_tokens=__A )
lowerCAmelCase_ :Dict = tokenizer_r.convert_ids_to_tokens(__A )
lowerCAmelCase_ :Optional[Any] = tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase_ :int = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(__A )
]
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :List[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase_ :Tuple = tokenizer.encode("""你好""" , add_special_tokens=__A )
lowerCAmelCase_ :Union[str, Any] = tokenizer.encode("""你是谁""" , add_special_tokens=__A )
lowerCAmelCase_ :Dict = tokenizer.build_inputs_with_special_tokens(__A )
lowerCAmelCase_ :List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Union[str, Any] = self.get_tokenizers(do_lower_case=__A )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase_ :str = """你好,你是谁"""
lowerCAmelCase_ :List[str] = tokenizer.tokenize(__A )
lowerCAmelCase_ :Any = tokenizer.convert_tokens_to_ids(__A )
lowerCAmelCase_ :Tuple = tokenizer.convert_tokens_to_shape_ids(__A )
lowerCAmelCase_ :Dict = tokenizer.convert_tokens_to_pronunciation_ids(__A )
lowerCAmelCase_ :List[Any] = tokenizer.prepare_for_model(
__A , __A , __A , add_special_tokens=__A )
lowerCAmelCase_ :Union[str, Any] = tokenizer.encode_plus(__A , add_special_tokens=__A )
self.assertEqual(__A , __A )
| 353 |
"""simple docstring"""
def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :Dict = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase_ :str = values[index] + knapsack(
lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 )
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 0 |
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _snake_case ( lowercase__ : List[str] , lowercase__ : Any , lowercase__ : int ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = AlbertConfig.from_json_file(lowercase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase_ :str = AlbertForPreTraining(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 354 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _snake_case ( lowercase__ : bool = True , *lowercase__ : Optional[int] , **lowercase__ : str ) -> Optional[Any]:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
lowerCAmelCase_ :Tuple = False
if main_process_only:
lowerCAmelCase_ :Dict = PartialState().local_process_index == 0
return _tqdm(*lowercase__ , **lowercase__ , disable=lowercase__ )
| 1 | 0 |
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__UpperCAmelCase = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def _snake_case ( lowercase__ : List[str]=True ) -> List[str]:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A__ ) )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Union[str, Any] = None
UpperCAmelCase_ :Optional[Any] = None
def __lowerCAmelCase ( self , __A , __A ) -> Dict:
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ :Dict = dataset_module_factory(__A , cache_dir=__A )
lowerCAmelCase_ :List[str] = import_main_class(dataset_module.module_path , dataset=__A )
lowerCAmelCase_ :DatasetBuilder = builder_cls(
cache_dir=__A , config_name=__A , hash=dataset_module.hash , )
lowerCAmelCase_ :Any = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__A ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
lowerCAmelCase_ :Dict = cached_path(__A , cache_dir=__A )
self.assertTrue(os.path.exists(__A ) )
@pytest.mark.integration
def _snake_case ( lowercase__ : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
lowerCAmelCase_ :List[Any] = dataset_module_factory("""wikipedia""" , cache_dir=lowercase__ )
lowerCAmelCase_ :Optional[int] = import_main_class(dataset_module.module_path )
lowerCAmelCase_ :DatasetBuilder = builder_cls(
cache_dir=lowercase__ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowerCAmelCase_ :Optional[int] = None
builder_instance.download_and_prepare()
lowerCAmelCase_ :Tuple = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _snake_case ( lowercase__ : Dict ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Dict = dataset_module_factory("""wikipedia""" , cache_dir=lowercase__ )
lowerCAmelCase_ :Any = import_main_class(dataset_module.module_path , dataset=lowercase__ )
lowerCAmelCase_ :DatasetBuilder = builder_cls(
cache_dir=lowercase__ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
lowerCAmelCase_ :Dict = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowercase__ , lowercase__ )
assert "train" in ds
assert isinstance(ds["""train"""] , lowercase__ )
assert next(iter(ds["""train"""] ) )
| 355 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__UpperCAmelCase = get_tests_dir('fixtures/dummy-config.json')
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = 0
def __lowerCAmelCase ( self ) -> List[str]:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCAmelCase_ :int = os.path.join(__A , """fake-roberta""" )
os.makedirs(__A , exist_ok=__A )
with open(os.path.join(__A , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertEqual(type(__A ) , __A )
def __lowerCAmelCase ( self ) -> Optional[int]:
try:
AutoConfig.register("""custom""" , __A )
# Wrong model type will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""model""" , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""bert""" , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase_ :Union[str, Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Optional[int] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowerCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
__A , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""bert-base""" )
def __lowerCAmelCase ( self ) -> Any:
with self.assertRaisesRegex(
__A , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , revision="""aaaaaa""" )
def __lowerCAmelCase ( self ) -> int:
with self.assertRaisesRegex(
__A , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def __lowerCAmelCase ( self ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
lowerCAmelCase_ :str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def __lowerCAmelCase ( self ) -> int:
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :int = "new-model"
try:
AutoConfig.register("""new-model""" , __A )
# If remote code is not set, the default is to use local
lowerCAmelCase_ :Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
lowerCAmelCase_ :Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 1 | 0 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'spiece.model'}
__UpperCAmelCase = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
__UpperCAmelCase = {
'AI-Sweden/gpt-sw3-126m': 20_48,
'AI-Sweden/gpt-sw3-350m': 20_48,
'AI-Sweden/gpt-sw3-1.6b': 20_48,
'AI-Sweden/gpt-sw3-6.7b': 20_48,
'AI-Sweden/gpt-sw3-20b': 20_48,
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = VOCAB_FILES_NAMES
UpperCAmelCase_ :List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ :str = ["input_ids", "attention_mask"]
def __init__( self , __A , __A=False , __A=False , __A=False , __A=None , __A=None , __A=None , __A=None , __A = None , **__A , ) -> None:
lowerCAmelCase_ :Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase_ :Any = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
lowerCAmelCase_ :str = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCAmelCase_ :Dict = """<|endoftext|>""" if eos_token is None else eos_token
lowerCAmelCase_ :Any = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCAmelCase_ :Dict = unk_token if pad_token is None else pad_token
lowerCAmelCase_ :List[Any] = eos_token if bos_token is None else bos_token
else:
lowerCAmelCase_ :Any = """<pad>""" if pad_token is None else pad_token
lowerCAmelCase_ :Any = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , pad_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
lowerCAmelCase_ :List[Any] = do_lower_case
lowerCAmelCase_ :Dict = remove_space
lowerCAmelCase_ :Optional[int] = keep_accents
lowerCAmelCase_ :int = vocab_file
lowerCAmelCase_ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
lowerCAmelCase_ :List[str] = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCAmelCase_ :int = re.compile(
f"""[{"".join(map(__A , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__( self ) -> Optional[Any]:
lowerCAmelCase_ :Dict = self.__dict__.copy()
lowerCAmelCase_ :str = None
return state
def __setstate__( self , __A ) -> List[Any]:
lowerCAmelCase_ :int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase_ :Optional[Any] = {}
lowerCAmelCase_ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowerCAmelCase ( self ) -> int:
return len(self.sp_model )
def __lowerCAmelCase ( self , __A ) -> str:
lowerCAmelCase_ :Any = self.non_printing_characters_re.sub("""""" , __A )
# Normalize whitespaces
lowerCAmelCase_ :str = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
lowerCAmelCase_ :Union[str, Any] = unicodedata.normalize("""NFC""" , __A )
return text
def __lowerCAmelCase ( self , __A , **__A ) -> List[str]:
lowerCAmelCase_ :Dict = self.preprocess_text(__A )
return self.sp_model.encode(__A , out_type=__A )
def __lowerCAmelCase ( self , __A ) -> int:
return self.sp_model.PieceToId(__A )
def __lowerCAmelCase ( self , __A ) -> str:
return self.sp_model.IdToPiece(__A )
@staticmethod
def __lowerCAmelCase ( __A ) -> str:
return out_string
def __lowerCAmelCase ( self , __A ) -> str:
lowerCAmelCase_ :int = []
lowerCAmelCase_ :int = """"""
lowerCAmelCase_ :List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
lowerCAmelCase_ :Tuple = True
lowerCAmelCase_ :Optional[int] = []
else:
current_sub_tokens.append(__A )
lowerCAmelCase_ :str = False
out_string += self.sp_model.decode(__A )
return out_string
def __lowerCAmelCase ( self ) -> Dict[str, int]:
lowerCAmelCase_ :int = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ :Optional[int] = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , """wb""" ) as fi:
lowerCAmelCase_ :str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def __lowerCAmelCase ( self , __A , __A = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(__A , __A ):
lowerCAmelCase_ :Dict = self.preprocess_text(__A )
lowerCAmelCase_ :int = self.sp_model.encode(__A )
else:
lowerCAmelCase_ :List[str] = [self.preprocess_text(__A ) for t in text]
lowerCAmelCase_ :List[Any] = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
lowerCAmelCase_ :Dict = torch.tensor(__A )
return token_ids
def __lowerCAmelCase ( self , __A ) -> str:
return self.sp_model.decode(__A )
def __lowerCAmelCase ( self , __A ) -> List[int]:
lowerCAmelCase_ :str = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
lowerCAmelCase_ :Optional[Any] = (
f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(__A ) + f"""{self.bos_token}Bot:"""
)
return self.encode(text=__A )
| 356 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :str = GPTSanJapaneseTokenizer
UpperCAmelCase_ :Optional[int] = False
UpperCAmelCase_ :Optional[int] = {"do_clean_text": False, "add_prefix_space": False}
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
# fmt: off
lowerCAmelCase_ :Dict = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase_ :List[str] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowerCAmelCase_ :int = {"""unk_token""": """<unk>"""}
lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__A ) )
def __lowerCAmelCase ( self , **__A ) -> int:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__A )
def __lowerCAmelCase ( self , __A ) -> Dict:
lowerCAmelCase_ :List[Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowerCAmelCase_ :Optional[int] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def __lowerCAmelCase ( self , __A ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = self.get_input_output_texts(__A )
lowerCAmelCase_ :List[str] = tokenizer.encode(__A , add_special_tokens=__A )
lowerCAmelCase_ :str = tokenizer.decode(__A , clean_up_tokenization_spaces=__A )
return text, ids
def __lowerCAmelCase ( self ) -> str:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Dict:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> int:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Union[str, Any] = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ :Optional[int] = """こんにちは、世界。 こんばんは、㔺界。"""
lowerCAmelCase_ :Any = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowerCAmelCase_ :Tuple = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids without special tokens
lowerCAmelCase_ :List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCAmelCase_ :List[str] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids with special tokens
lowerCAmelCase_ :Any = tokens + [tokenizer.unk_token]
lowerCAmelCase_ :Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCAmelCase_ :Union[str, Any] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ :Optional[int] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowerCAmelCase_ :str = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowerCAmelCase_ :str = tokenizer.encode(__A )
lowerCAmelCase_ :Dict = tokenizer.decode(__A )
self.assertEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase_ :Optional[Any] = """こんにちは、世界。"""
lowerCAmelCase_ :Any = """こんばんは、㔺界。😀"""
lowerCAmelCase_ :Optional[Any] = """こんにちは、世界。こんばんは、世界。😀"""
lowerCAmelCase_ :List[Any] = tokenizer.encode(prefix_text + input_text )
lowerCAmelCase_ :List[str] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowerCAmelCase_ :int = tokenizer.encode(__A , prefix_text=__A )
lowerCAmelCase_ :int = tokenizer.decode(__A )
lowerCAmelCase_ :Dict = tokenizer.decode(__A )
lowerCAmelCase_ :Tuple = tokenizer.decode(__A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase_ :List[Any] = """こんにちは、世界。"""
lowerCAmelCase_ :Optional[int] = """こんばんは、㔺界。😀"""
lowerCAmelCase_ :List[str] = len(tokenizer.encode(__A ) ) - 2
lowerCAmelCase_ :Dict = len(tokenizer.encode(__A ) ) - 2
lowerCAmelCase_ :int = [1] + [0] * (len_prefix + len_text + 1)
lowerCAmelCase_ :List[Any] = [1] * (len_prefix + len_text + 1) + [0]
lowerCAmelCase_ :Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCAmelCase_ :List[Any] = tokenizer(prefix_text + input_text ).token_type_ids
lowerCAmelCase_ :List[str] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowerCAmelCase_ :List[Any] = tokenizer(__A , prefix_text=__A ).token_type_ids
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase_ :int = tokenizer.encode("""あンいワ""" )
lowerCAmelCase_ :Optional[Any] = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowerCAmelCase_ :int = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertNotEqual(__A , __A )
self.assertNotEqual(__A , __A )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase_ :int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowerCAmelCase_ :Dict = tokenizer(__A , padding=__A )
lowerCAmelCase_ :Any = tokenizer.batch_encode_plus(__A , padding=__A )
# fmt: off
lowerCAmelCase_ :int = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowerCAmelCase_ :List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCAmelCase_ :int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __A )
self.assertListEqual(x_token.token_type_ids , __A )
self.assertListEqual(x_token.attention_mask , __A )
self.assertListEqual(x_token_a.input_ids , __A )
self.assertListEqual(x_token_a.token_type_ids , __A )
self.assertListEqual(x_token_a.attention_mask , __A )
def __lowerCAmelCase ( self ) -> Tuple:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __lowerCAmelCase ( self ) -> str:
# tokenizer has no padding token
pass
| 1 | 0 |
"""simple docstring"""
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 357 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__UpperCAmelCase = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
__UpperCAmelCase = dataset.iloc[:, 1:2].values
__UpperCAmelCase = dataset.iloc[:, 2].values
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0)
__UpperCAmelCase = PolynomialFeatures(degree=4)
__UpperCAmelCase = poly_reg.fit_transform(X)
__UpperCAmelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def _snake_case ( ) -> str:
'''simple docstring'''
plt.scatter(lowercase__ , lowercase__ , color="""red""" )
plt.plot(lowercase__ , pol_reg.predict(poly_reg.fit_transform(lowercase__ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 1 | 0 |