code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="resnet50" , lowerCamelCase__=3 , lowerCamelCase__=32 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , ) -> str:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = out_indices if out_indices is not None else [4]
__lowerCamelCase = stage_names
__lowerCamelCase = out_features
__lowerCamelCase = backbone
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = use_pretrained_backbone
__lowerCamelCase = is_training
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = self.get_config()
return config, pixel_values
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = TimmBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (TimmBackbone,) if is_torch_available() else ()
snake_case_ = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = TimmBackboneModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = 'resnet18'
__lowerCamelCase = 'microsoft/resnet-18'
__lowerCamelCase = AutoBackbone.from_pretrained(lowerCamelCase__ , use_timm_backbone=lowerCamelCase__ )
__lowerCamelCase = AutoBackbone.from_pretrained(lowerCamelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowerCamelCase = AutoBackbone.from_pretrained(lowerCamelCase__ , use_timm_backbone=lowerCamelCase__ , out_indices=[1, 2, 3] )
__lowerCamelCase = AutoBackbone.from_pretrained(lowerCamelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def lowercase_ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowercase_ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('Safetensors is not supported by timm.' )
def lowercase_ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ) -> int:
'''simple docstring'''
pass
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = True
__lowerCamelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCamelCase = self.all_model_classes[0]
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ )
__lowerCamelCase = outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCamelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCamelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCamelCase = copy.deepcopy(lowerCamelCase__ )
__lowerCamelCase = None
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowerCamelCase = copy.deepcopy(lowerCamelCase__ )
__lowerCamelCase = False
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(**lowerCamelCase__ )
| 365 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
__lowerCamelCase = tokenizer('Hello there' , return_tensors='pt' ).input_ids
__lowerCamelCase = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__lowerCamelCase = model(input_ids.to(lowerCamelCase__ ) , labels=labels.to(lowerCamelCase__ ) ).loss
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 348 | 0 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = None
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=0.9_99 , UpperCamelCase__ : Dict="cosine" , ) -> Any:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase__ : str ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase__ : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__lowerCamelCase = []
for i in range(UpperCamelCase__ ):
__lowerCamelCase = i / num_diffusion_timesteps
__lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase__ ) / alpha_bar_fn(UpperCamelCase__ ) , UpperCamelCase__ ) )
return torch.tensor(UpperCamelCase__ , dtype=torch.floataa )
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase__ = 1_000 , lowerCamelCase__ = "fixed_small_log" , lowerCamelCase__ = True , lowerCamelCase__ = 1.0 , lowerCamelCase__ = "epsilon" , lowerCamelCase__ = "squaredcos_cap_v2" , ) -> Union[str, Any]:
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' )
__lowerCamelCase = betas_for_alpha_bar(lowerCamelCase__ )
__lowerCamelCase = 1.0 - self.betas
__lowerCamelCase = torch.cumprod(self.alphas , dim=0 )
__lowerCamelCase = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__lowerCamelCase = 1.0
# setable values
__lowerCamelCase = None
__lowerCamelCase = torch.from_numpy(np.arange(0 , lowerCamelCase__ )[::-1].copy() )
__lowerCamelCase = variance_type
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = num_inference_steps
__lowerCamelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__lowerCamelCase = (np.arange(0 , lowerCamelCase__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__lowerCamelCase = torch.from_numpy(lowerCamelCase__ ).to(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None ) -> Union[str, Any]:
'''simple docstring'''
if prev_timestep is None:
__lowerCamelCase = t - 1
__lowerCamelCase = self.alphas_cumprod[t]
__lowerCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__lowerCamelCase = 1 - alpha_prod_t
__lowerCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__lowerCamelCase = self.betas[t]
else:
__lowerCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__lowerCamelCase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__lowerCamelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__lowerCamelCase = torch.log(torch.clamp(lowerCamelCase__ , min=1e-20 ) )
__lowerCamelCase = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__lowerCamelCase = variance.log()
__lowerCamelCase = beta.log()
__lowerCamelCase = (predicted_variance + 1) / 2
__lowerCamelCase = frac * max_log + (1 - frac) * min_log
return variance
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__=None , lowerCamelCase__ = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
'''simple docstring'''
__lowerCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__lowerCamelCase , __lowerCamelCase = torch.split(lowerCamelCase__ , sample.shape[1] , dim=1 )
else:
__lowerCamelCase = None
# 1. compute alphas, betas
if prev_timestep is None:
__lowerCamelCase = t - 1
__lowerCamelCase = self.alphas_cumprod[t]
__lowerCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__lowerCamelCase = 1 - alpha_prod_t
__lowerCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__lowerCamelCase = self.betas[t]
__lowerCamelCase = self.alphas[t]
else:
__lowerCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
__lowerCamelCase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__lowerCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__lowerCamelCase = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
' for the UnCLIPScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__lowerCamelCase = torch.clamp(
lowerCamelCase__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__lowerCamelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__lowerCamelCase = 0
if t > 0:
__lowerCamelCase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=lowerCamelCase__ , device=model_output.device )
__lowerCamelCase = self._get_variance(
lowerCamelCase__ , predicted_variance=lowerCamelCase__ , prev_timestep=lowerCamelCase__ , )
if self.variance_type == "fixed_small_log":
__lowerCamelCase = variance
elif self.variance_type == "learned_range":
__lowerCamelCase = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
' for the UnCLIPScheduler.' )
__lowerCamelCase = variance * variance_noise
__lowerCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowerCamelCase__ , pred_original_sample=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> torch.FloatTensor:
'''simple docstring'''
__lowerCamelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__lowerCamelCase = timesteps.to(original_samples.device )
__lowerCamelCase = alphas_cumprod[timesteps] ** 0.5
__lowerCamelCase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__lowerCamelCase = sqrt_alpha_prod.unsqueeze(-1 )
__lowerCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
__lowerCamelCase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__lowerCamelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__lowerCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 366 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
__lowerCamelCase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
__lowerCamelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCamelCase = [4, 4, 4, 4]
__lowerCamelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
else:
__lowerCamelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCamelCase = 96
elif "small" in model_name:
__lowerCamelCase = 96
elif "base" in model_name:
__lowerCamelCase = 128
elif "large" in model_name:
__lowerCamelCase = 192
elif "xlarge" in model_name:
__lowerCamelCase = 256
elif "huge" in model_name:
__lowerCamelCase = 352
# set label information
__lowerCamelCase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowerCamelCase = 'imagenet-22k-id2label.json'
else:
__lowerCamelCase = 'imagenet-1k-id2label.json'
__lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = FocalNetConfig(
embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , focal_levels=UpperCamelCase__ , focal_windows=UpperCamelCase__ , use_conv_embed=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , use_post_layernorm=UpperCamelCase__ , use_layerscale=UpperCamelCase__ , )
return config
def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> str:
"""simple docstring"""
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowerCamelCase = 'encoder.' + name
if "encoder.layers" in name:
__lowerCamelCase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowerCamelCase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowerCamelCase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCamelCase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCamelCase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCamelCase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowerCamelCase = 'layernorm.weight'
if name == "norm.bias":
__lowerCamelCase = 'layernorm.bias'
if "head" in name:
__lowerCamelCase = name.replace('head' , 'classifier' )
else:
__lowerCamelCase = 'focalnet.' + name
return name
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Dict:
"""simple docstring"""
__lowerCamelCase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowerCamelCase = model_name_to_url[model_name]
print('Checkpoint URL: ' , UpperCamelCase__ )
__lowerCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
__lowerCamelCase = get_focalnet_config(UpperCamelCase__ )
__lowerCamelCase = FocalNetForImageClassification(UpperCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(UpperCamelCase__ )
# verify conversion
__lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase = BitImageProcessor(
do_resize=UpperCamelCase__ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ , crop_size=224 , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , )
__lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
__lowerCamelCase = processor(images=UpperCamelCase__ , return_tensors='pt' )
__lowerCamelCase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
__lowerCamelCase = image_transforms(UpperCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCamelCase__ , atol=1E-4 )
__lowerCamelCase = model(**UpperCamelCase__ )
__lowerCamelCase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
__lowerCamelCase = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
__lowerCamelCase = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
__lowerCamelCase = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
__lowerCamelCase = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
__lowerCamelCase = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
__A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 348 | 0 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__A = logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
snake_case_ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
snake_case_ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
snake_case_ = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case_ = field(
default=__magic_name__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.task_name.lower()
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''train'''
snake_case_ = '''dev'''
snake_case_ = '''test'''
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = Split.train , lowerCamelCase__ = None , ) -> int:
'''simple docstring'''
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , lowerCamelCase__ , )
__lowerCamelCase = args
__lowerCamelCase = glue_processors[args.task_name]()
__lowerCamelCase = glue_output_modes[args.task_name]
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
try:
__lowerCamelCase = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
__lowerCamelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
__lowerCamelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowerCamelCase , __lowerCamelCase = label_list[2], label_list[1]
__lowerCamelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCamelCase = cached_features_file + '.lock'
with FileLock(lowerCamelCase__ ):
if os.path.exists(lowerCamelCase__ ) and not args.overwrite_cache:
__lowerCamelCase = time.time()
__lowerCamelCase = torch.load(lowerCamelCase__ )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
__lowerCamelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__lowerCamelCase = self.processor.get_test_examples(args.data_dir )
else:
__lowerCamelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__lowerCamelCase = examples[:limit_length]
__lowerCamelCase = glue_convert_examples_to_features(
lowerCamelCase__ , lowerCamelCase__ , max_length=args.max_seq_length , label_list=lowerCamelCase__ , output_mode=self.output_mode , )
__lowerCamelCase = time.time()
torch.save(self.features , lowerCamelCase__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ) -> List[Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self , lowerCamelCase__ ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def lowercase_ ( self ) -> int:
'''simple docstring'''
return self.label_list
| 367 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : list[float] , UpperCamelCase__ : list[float] ) -> float:
"""simple docstring"""
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase , __lowerCamelCase = divmod(len(UpperCamelCase__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = [float(x) for x in input("Enter the elements of first array: ").split()]
__A = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 348 | 0 |
__A = [0, 2, 4, 6, 8]
__A = [1, 3, 5, 7, 9]
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__lowerCamelCase = 0
for digit in range(10 ):
__lowerCamelCase = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , UpperCamelCase__ , UpperCamelCase__ )
return result
__lowerCamelCase = 0
for digita in range(10 ):
__lowerCamelCase = digita
if (remainder + digita) % 2 == 0:
__lowerCamelCase = ODD_DIGITS
else:
__lowerCamelCase = EVEN_DIGITS
for digita in other_parity_digits:
__lowerCamelCase = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , UpperCamelCase__ , UpperCamelCase__ , )
return result
def lowerCamelCase_ ( UpperCamelCase__ : int = 9 ) -> int:
"""simple docstring"""
__lowerCamelCase = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(UpperCamelCase__ , 0 , [0] * length , UpperCamelCase__ )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 368 |
__A = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.3_5_5_8_1_8,
}
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowerCamelCase = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(UpperCamelCase__ )}"""
)
raise ValueError(UpperCamelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''sew-d'''
def __init__( self , lowerCamelCase__=32 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__=2 , lowerCamelCase__=512 , lowerCamelCase__=256 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=("p2c", "c2p") , lowerCamelCase__="layer_norm" , lowerCamelCase__="gelu_python" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-7 , lowerCamelCase__=1e-5 , lowerCamelCase__="group" , lowerCamelCase__="gelu" , lowerCamelCase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCamelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase__=False , lowerCamelCase__=128 , lowerCamelCase__=16 , lowerCamelCase__=True , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__="mean" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_norm
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = squeeze_factor
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = position_buckets
__lowerCamelCase = share_att_key
__lowerCamelCase = relative_attention
__lowerCamelCase = norm_rel_ebd
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = feature_layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# sequence classification
__lowerCamelCase = use_weighted_layer_sum
__lowerCamelCase = classifier_proj_size
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 369 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''philschmid/bart-large-cnn-samsum'''
snake_case_ = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
snake_case_ = '''summarizer'''
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSeqaSeqLM
snake_case_ = ['''text''']
snake_case_ = ['''text''']
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.pre_processor(lowerCamelCase__ , return_tensors='pt' , truncation=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return self.model.generate(**lowerCamelCase__ )[0]
def lowercase_ ( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
return self.pre_processor.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
| 348 | 0 |
import operator
def lowerCamelCase_ ( UpperCamelCase__ : list , UpperCamelCase__ : bool = False , UpperCamelCase__ : list | None = None ) -> list:
"""simple docstring"""
__lowerCamelCase = operator.lt if reverse else operator.gt
__lowerCamelCase = solution or []
if not arr:
return solution
__lowerCamelCase = [arr.pop(0 )]
for i, item in enumerate(UpperCamelCase__ ):
if _operator(UpperCamelCase__ , sublist[-1] ):
sublist.append(UpperCamelCase__ )
arr.pop(UpperCamelCase__ )
# merging sublist into solution list
if not solution:
solution.extend(UpperCamelCase__ )
else:
while sublist:
__lowerCamelCase = sublist.pop(0 )
for i, xx in enumerate(UpperCamelCase__ ):
if not _operator(UpperCamelCase__ , UpperCamelCase__ ):
solution.insert(UpperCamelCase__ , UpperCamelCase__ )
break
else:
solution.append(UpperCamelCase__ )
strand_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 370 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=4 , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = True
__lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = True
snake_case_ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__lowerCamelCase = model(lowerCamelCase__ )[0]
__lowerCamelCase = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
__lowerCamelCase = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__lowerCamelCase = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
__lowerCamelCase = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 348 | 0 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
snake_case_ = '''pixel_values'''
snake_case_ = False
snake_case_ = TimmBackboneConfig
def __init__( self , lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , 'timm' )
super().__init__(lowerCamelCase__ )
__lowerCamelCase = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase__ , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
__lowerCamelCase = getattr(lowerCamelCase__ , 'use_pretrained_backbone' , lowerCamelCase__ )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
__lowerCamelCase = config.out_indices if getattr(lowerCamelCase__ , 'out_indices' , lowerCamelCase__ ) is not None else (-1,)
__lowerCamelCase = timm.create_model(
config.backbone , pretrained=lowerCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCamelCase__ , **lowerCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__lowerCamelCase = self._backbone.return_layers
__lowerCamelCase = {layer['module']: str(lowerCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase__ )
@classmethod
def lowercase_ ( cls , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
__lowerCamelCase = kwargs.pop('config' , TimmBackboneConfig() )
__lowerCamelCase = kwargs.pop('use_timm_backbone' , lowerCamelCase__ )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
__lowerCamelCase = kwargs.pop('num_channels' , config.num_channels )
__lowerCamelCase = kwargs.pop('features_only' , config.features_only )
__lowerCamelCase = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
__lowerCamelCase = kwargs.pop('out_indices' , config.out_indices )
__lowerCamelCase = TimmBackboneConfig(
backbone=lowerCamelCase__ , num_channels=lowerCamelCase__ , features_only=lowerCamelCase__ , use_pretrained_backbone=lowerCamelCase__ , out_indices=lowerCamelCase__ , )
return super()._from_config(lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
pass
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
__lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__lowerCamelCase = self._all_layers
__lowerCamelCase = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase = self._return_layers
__lowerCamelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
__lowerCamelCase = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase = None
__lowerCamelCase = tuple(lowerCamelCase__ )
__lowerCamelCase = tuple(lowerCamelCase__ ) if hidden_states is not None else None
if not return_dict:
__lowerCamelCase = (feature_maps,)
if output_hidden_states:
__lowerCamelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase__ , hidden_states=lowerCamelCase__ , attentions=lowerCamelCase__ )
| 371 |
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
| 348 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = []
for part_id in partition_order:
__lowerCamelCase = df.where(F"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(UpperCamelCase__ ):
expected_row_ids_and_row_dicts.append((F"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowerCamelCase = spark.range(100 ).repartition(1 )
__lowerCamelCase = Spark(UpperCamelCase__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowerCamelCase = spark.range(10 ).repartition(2 )
__lowerCamelCase = [1, 0]
__lowerCamelCase = _generate_iterable_examples(UpperCamelCase__ , UpperCamelCase__ ) # Reverse the partitions.
__lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase__ , UpperCamelCase__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__lowerCamelCase , __lowerCamelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowerCamelCase = spark.range(10 ).repartition(1 )
__lowerCamelCase = SparkExamplesIterable(UpperCamelCase__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(UpperCamelCase__ ):
assert row_id == F"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowerCamelCase = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
__lowerCamelCase = lambda UpperCamelCase__ : x.reverse()
__lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase__ , [2, 1, 0] )
__lowerCamelCase = SparkExamplesIterable(UpperCamelCase__ ).shuffle_data_sources(UpperCamelCase__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(UpperCamelCase__ ):
__lowerCamelCase , __lowerCamelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowerCamelCase = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__lowerCamelCase = SparkExamplesIterable(UpperCamelCase__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(UpperCamelCase__ ):
__lowerCamelCase , __lowerCamelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__lowerCamelCase = SparkExamplesIterable(UpperCamelCase__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(UpperCamelCase__ ):
__lowerCamelCase , __lowerCamelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowerCamelCase = spark.range(100 ).repartition(1 )
__lowerCamelCase = Spark(UpperCamelCase__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 350 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowercase_ ( self , lowerCamelCase__=0 ) -> int:
'''simple docstring'''
__lowerCamelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowerCamelCase__ ) )
__lowerCamelCase = np.random.RandomState(lowerCamelCase__ )
__lowerCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# warmup pass to apply optimizations
__lowerCamelCase = pipe(**self.get_dummy_inputs() )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = ort.SessionOptions()
__lowerCamelCase = False
return options
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__lowerCamelCase = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A fantasy landscape, trending on artstation'
__lowerCamelCase = np.random.RandomState(0 )
__lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowerCamelCase = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__lowerCamelCase = init_image.resize((768, 512) )
__lowerCamelCase = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A fantasy landscape, trending on artstation'
__lowerCamelCase = np.random.RandomState(0 )
__lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowerCamelCase = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 348 | 0 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=0 ) -> List[str]:
"""simple docstring"""
if name is None:
__lowerCamelCase = None
else:
__lowerCamelCase = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
__lowerCamelCase = fmt.format(UpperCamelCase__ )
# Print and recurse (if needed).
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if msg is not None:
print(UpperCamelCase__ )
for k in val.keys():
recursive_print(UpperCamelCase__ , val[k] , spaces + 2 )
elif isinstance(UpperCamelCase__ , torch.Tensor ):
print(UpperCamelCase__ , ':' , val.size() )
else:
print(UpperCamelCase__ , ':' , UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
__lowerCamelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
__lowerCamelCase = param.view(*UpperCamelCase__ )
__lowerCamelCase = param.transpose(0 , 2 )
__lowerCamelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
__lowerCamelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
__lowerCamelCase = param.view(*UpperCamelCase__ )
__lowerCamelCase = param.transpose(0 , 1 ).contiguous()
__lowerCamelCase = param.view(*UpperCamelCase__ )
return param
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
"""simple docstring"""
__lowerCamelCase = {}
# old versions did not store training args
__lowerCamelCase = input_state_dict.get('args' , UpperCamelCase__ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
__lowerCamelCase = ds_args.padded_vocab_size
__lowerCamelCase = ds_args.max_position_embeddings
__lowerCamelCase = ds_args.hidden_size
__lowerCamelCase = ds_args.num_layers
__lowerCamelCase = ds_args.num_attention_heads
__lowerCamelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
__lowerCamelCase = config.n_head
# The hidden_size per head.
__lowerCamelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
__lowerCamelCase = input_state_dict['checkpoint_version']
else:
__lowerCamelCase = 0.0
# The model.
__lowerCamelCase = input_state_dict['model']
# The language model.
__lowerCamelCase = model['language_model']
# The embeddings.
__lowerCamelCase = lm['embedding']
# The word embeddings.
__lowerCamelCase = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
__lowerCamelCase = word_embeddings[: config.vocab_size, :]
__lowerCamelCase = word_embeddings
# The position embeddings.
__lowerCamelCase = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
__lowerCamelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
__lowerCamelCase = pos_embeddings
# The transformer.
__lowerCamelCase = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
__lowerCamelCase = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
__lowerCamelCase = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
__lowerCamelCase = layer_re.match(UpperCamelCase__ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
__lowerCamelCase = int(m.group(1 ) )
# The name of the operation.
__lowerCamelCase = m.group(2 )
# Is it a weight or a bias?
__lowerCamelCase = m.group(3 )
# The name of the layer.
__lowerCamelCase = F"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
__lowerCamelCase = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
__lowerCamelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
__lowerCamelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
__lowerCamelCase = torch.tensor(-1E4 , dtype=torch.floataa )
__lowerCamelCase = masked_bias
__lowerCamelCase = fix_query_key_value_ordering(UpperCamelCase__ , UpperCamelCase__ , 3 , UpperCamelCase__ , UpperCamelCase__ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
__lowerCamelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
__lowerCamelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
__lowerCamelCase = fix_query_key_value_ordering(UpperCamelCase__ , UpperCamelCase__ , 3 , UpperCamelCase__ , UpperCamelCase__ )
# Store. No change of shape.
__lowerCamelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
__lowerCamelCase = megatron_to_transformers[op_name]
__lowerCamelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
__lowerCamelCase = megatron_to_transformers[op_name]
__lowerCamelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
__lowerCamelCase = transformer['final_layernorm.weight']
__lowerCamelCase = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
__lowerCamelCase = word_embeddings
# It should be done!
return output_state_dict
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=UpperCamelCase__ , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=UpperCamelCase__ , help='An optional config json file describing the pre-trained model.' , )
__lowerCamelCase = parser.parse_args()
# Extract the basename.
__lowerCamelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
__lowerCamelCase = torch.load(UpperCamelCase__ , map_location='cpu' )
else:
__lowerCamelCase = torch.load(args.path_to_checkpoint , map_location='cpu' )
__lowerCamelCase = input_state_dict.get('args' , UpperCamelCase__ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
__lowerCamelCase = 'gelu_fast'
elif ds_args.openai_gelu:
__lowerCamelCase = 'gelu_new'
else:
__lowerCamelCase = 'gelu'
else:
# in the very early days this used to be "gelu_new"
__lowerCamelCase = 'gelu_new'
# Spell out all parameters in case the defaults change.
__lowerCamelCase = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=UpperCamelCase__ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=UpperCamelCase__ , summary_activation=UpperCamelCase__ , summary_proj_to_labels=UpperCamelCase__ , summary_first_dropout=0.1 , scale_attn_weights=UpperCamelCase__ , use_cache=UpperCamelCase__ , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
__lowerCamelCase = GPTaConfig.from_json_file(args.config_file )
__lowerCamelCase = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
__lowerCamelCase = convert_megatron_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(UpperCamelCase__ , UpperCamelCase__ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
__lowerCamelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
__lowerCamelCase = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
__lowerCamelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
__lowerCamelCase = 'gpt2'
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ )
__lowerCamelCase = type(UpperCamelCase__ ).__name__
__lowerCamelCase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(UpperCamelCase__ )
# Save tokenizer based on args
print(F"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(UpperCamelCase__ )
# Store the state_dict to file.
__lowerCamelCase = os.path.join(UpperCamelCase__ , 'pytorch_model.bin' )
print(F"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 351 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__A = logging.get_logger(__name__)
__A = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''whisper'''
snake_case_ = ['''past_key_values''']
snake_case_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCamelCase__=51_865 , lowerCamelCase__=80 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=1_536 , lowerCamelCase__=1_536 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=50_257 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="gelu" , lowerCamelCase__=256 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=1_500 , lowerCamelCase__=448 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=None , lowerCamelCase__=[220, 50_256] , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=False , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__=7 , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = num_mel_bins
__lowerCamelCase = d_model
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = use_cache
__lowerCamelCase = encoder_layers
__lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase = max_source_positions
__lowerCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase = classifier_proj_size
__lowerCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
__lowerCamelCase = median_filter_width
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , suppress_tokens=lowerCamelCase__ , begin_suppress_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__lowerCamelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
__lowerCamelCase = {0: 'batch'}
else:
__lowerCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ , direction='inputs' )
return common_inputs
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = 22_050 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 220 , ) -> Mapping[str, Any]:
'''simple docstring'''
__lowerCamelCase = OrderedDict()
__lowerCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase__ , framework=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , time_duration=lowerCamelCase__ , frequency=lowerCamelCase__ , )
__lowerCamelCase = encoder_inputs['input_features'].shape[2]
__lowerCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = encoder_inputs.pop('input_features' )
__lowerCamelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
__lowerCamelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def lowercase_ ( self ) -> float:
'''simple docstring'''
return 1e-3
| 348 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
__lowerCamelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCamelCase_ ( UpperCamelCase__ : int = 5000 ) -> int:
"""simple docstring"""
__lowerCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , UpperCamelCase__ )]
for i, pentagonal_i in enumerate(UpperCamelCase__ ):
for j in range(UpperCamelCase__ , len(UpperCamelCase__ ) ):
__lowerCamelCase = pentagonal_nums[j]
__lowerCamelCase = pentagonal_i + pentagonal_j
__lowerCamelCase = pentagonal_j - pentagonal_i
if is_pentagonal(UpperCamelCase__ ) and is_pentagonal(UpperCamelCase__ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 352 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=14 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , ) -> int:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = rotary_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = initializer_range
__lowerCamelCase = None
__lowerCamelCase = vocab_size - 1
__lowerCamelCase = vocab_size - 1
__lowerCamelCase = vocab_size - 1
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(lowerCamelCase__ )
__lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ )
__lowerCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase__ , )
__lowerCamelCase = model(lowerCamelCase__ )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(lowerCamelCase__ )
__lowerCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
snake_case_ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = FlaxGPTJModelTester(self )
def lowercase_ ( self ) -> str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@tooslow
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )
__lowerCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCamelCase = False
__lowerCamelCase = model.config.eos_token_id
__lowerCamelCase = jax.jit(model.generate )
__lowerCamelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
__lowerCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@is_pt_flax_cross_test
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape
__lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval()
__lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa )
__lowerCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase__ )
__lowerCamelCase = fx_state
with torch.no_grad():
__lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple()
__lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase__ )
__lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
__lowerCamelCase = fx_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval()
__lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa )
__lowerCamelCase = load_flax_weights_in_pytorch_model(lowerCamelCase__ , fx_model.params )
__lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape
__lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple()
__lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase__ )
__lowerCamelCase = pt_model_class.from_pretrained(lowerCamelCase__ , from_flax=lowerCamelCase__ )
with torch.no_grad():
__lowerCamelCase = pt_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 348 | 0 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__A = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for attribute in key.split('.' ):
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
__lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
__lowerCamelCase = []
__lowerCamelCase = fairseq_model.state_dict()
__lowerCamelCase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__lowerCamelCase = None
for name, value in fairseq_dict.items():
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase = True
elif name.split('.' )[0] == "proj":
__lowerCamelCase = fairseq_model.proj
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2]
__lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ )
if "weight_g" in name:
__lowerCamelCase = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase = 'weight_v'
elif "bias" in name:
__lowerCamelCase = 'bias'
elif "weight" in name:
__lowerCamelCase = 'weight'
else:
__lowerCamelCase = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = full_name.split('conv_layers.' )[-1]
__lowerCamelCase = name.split('.' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = emb.weight.shape
__lowerCamelCase = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
__lowerCamelCase = emb.weight.data
return lin_layer
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
with open(UpperCamelCase__ , 'r' , encoding='utf-8' ) as f:
__lowerCamelCase = f.readlines()
__lowerCamelCase = [line.split(' ' )[0] for line in lines]
__lowerCamelCase = len(UpperCamelCase__ )
__lowerCamelCase = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(UpperCamelCase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = WavaVecaConfig.from_pretrained(UpperCamelCase__ )
__lowerCamelCase = SpeechaTextaConfig.from_pretrained(
UpperCamelCase__ , vocab_size=UpperCamelCase__ , decoder_layers=UpperCamelCase__ , do_stable_layer_norm=UpperCamelCase__ )
__lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__lowerCamelCase = model[0].eval()
# set weights for wav2vec2 encoder
__lowerCamelCase = WavaVecaModel(UpperCamelCase__ )
__lowerCamelCase = recursively_load_weights_wavaveca(model.encoder , UpperCamelCase__ )
__lowerCamelCase = SpeechaTextaForCausalLM(UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase__ )
# set output linear layer
unexpected_keys.remove('embed_out' )
__lowerCamelCase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__lowerCamelCase = SpeechEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
__lowerCamelCase = False
# add projection layer
__lowerCamelCase = nn.Parameter(projection_layer.weight )
__lowerCamelCase = nn.Parameter(projection_layer.bias )
__lowerCamelCase = create_vocab_dict(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , 'vocab.json' ) , 'w' ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = SpeechaTextaTokenizer(os.path.join(UpperCamelCase__ , 'vocab.json' ) )
tokenizer.save_pretrained(UpperCamelCase__ )
__lowerCamelCase = hf_wavavec.config.to_dict()
__lowerCamelCase = tokenizer.pad_token_id
__lowerCamelCase = tokenizer.bos_token_id
__lowerCamelCase = tokenizer.eos_token_id
__lowerCamelCase = 'speech_to_text_2'
__lowerCamelCase = 'wav2vec2'
__lowerCamelCase = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase__ )
hf_wavavec.save_pretrained(UpperCamelCase__ )
feature_extractor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_02_24, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
__A = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 353 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__A = False
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return 12
@property
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return 12
@property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return 32
@property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowerCamelCase__ )
@property
def lowercase_ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = 12
__lowerCamelCase = 12
__lowerCamelCase = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
__lowerCamelCase = TransformeraDModel(**lowerCamelCase__ )
return model
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = 'cpu'
__lowerCamelCase = self.dummy_vqvae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = self.dummy_tokenizer
__lowerCamelCase = self.dummy_transformer
__lowerCamelCase = VQDiffusionScheduler(self.num_embed )
__lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings(learnable=lowerCamelCase__ )
__lowerCamelCase = VQDiffusionPipeline(
vqvae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , transformer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'teddy bear playing in the pool'
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='np' )
__lowerCamelCase = output.images
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe(
[prompt] , generator=lowerCamelCase__ , output_type='np' , return_dict=lowerCamelCase__ , num_inference_steps=2 )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowerCamelCase = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = 'cpu'
__lowerCamelCase = self.dummy_vqvae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = self.dummy_tokenizer
__lowerCamelCase = self.dummy_transformer
__lowerCamelCase = VQDiffusionScheduler(self.num_embed )
__lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowerCamelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
__lowerCamelCase = VQDiffusionPipeline(
vqvae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , transformer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'teddy bear playing in the pool'
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='np' )
__lowerCamelCase = output.images
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe(
[prompt] , generator=lowerCamelCase__ , output_type='np' , return_dict=lowerCamelCase__ , num_inference_steps=2 )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowerCamelCase = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
__lowerCamelCase = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
__lowerCamelCase = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 348 | 0 |
import requests
__A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__lowerCamelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 354 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=32 * 8 , lowerCamelCase__=32 * 8 , lowerCamelCase__=4 , lowerCamelCase__=64 , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = is_training
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = num_queries
__lowerCamelCase = num_channels
__lowerCamelCase = min_size
__lowerCamelCase = max_size
__lowerCamelCase = num_labels
__lowerCamelCase = hidden_dim
__lowerCamelCase = hidden_dim
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
__lowerCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
__lowerCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
__lowerCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
__lowerCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__lowerCamelCase = self.num_queries
__lowerCamelCase = self.num_labels
__lowerCamelCase = [1, 1, 1, 1]
__lowerCamelCase = self.num_channels
__lowerCamelCase = 64
__lowerCamelCase = 128
__lowerCamelCase = self.hidden_dim
__lowerCamelCase = self.hidden_dim
__lowerCamelCase = self.hidden_dim
return config
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = output.encoder_hidden_states
__lowerCamelCase = output.pixel_decoder_hidden_states
__lowerCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
__lowerCamelCase = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
__lowerCamelCase = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case_ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__lowerCamelCase = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = (self.model_tester.min_size,) * 2
__lowerCamelCase = {
'pixel_values': torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
'mask_labels': torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
'class_labels': torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
__lowerCamelCase = self.model_tester.get_config()
__lowerCamelCase = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
__lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
__lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
__lowerCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__A = 1e-4
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
__lowerCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
__lowerCamelCase = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowerCamelCase = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowerCamelCase = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
__lowerCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
# masks_queries_logits
__lowerCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__lowerCamelCase = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
__lowerCamelCase = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
__lowerCamelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__lowerCamelCase = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
__lowerCamelCase = inputs['pixel_values'].to(lowerCamelCase__ )
__lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['mask_labels']]
__lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['class_labels']]
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 348 | 0 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowerCamelCase = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , UpperCamelCase__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
assert _test_patching.open is open
__lowerCamelCase = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , UpperCamelCase__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
__lowerCamelCase = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , UpperCamelCase__ ):
pass
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , UpperCamelCase__ ) is None
with patch_submodule(_test_patching , 'len' , UpperCamelCase__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
__lowerCamelCase = '__test_patch_submodule_start_and_stop_mock__'
__lowerCamelCase = patch_submodule(_test_patching , 'open' , UpperCamelCase__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowerCamelCase = '__test_patch_submodule_successive_join__'
__lowerCamelCase = '__test_patch_submodule_successive_dirname__'
__lowerCamelCase = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , UpperCamelCase__ ):
with patch_submodule(_test_patching , 'os.rename' , UpperCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.dirname' , UpperCamelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , UpperCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.join' , UpperCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.dirname' , UpperCamelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
__lowerCamelCase = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , UpperCamelCase__ ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , UpperCamelCase__ ):
pass
| 355 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__A = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''mask2former'''
snake_case_ = ['''swin''']
snake_case_ = {'''hidden_size''': '''hidden_dim'''}
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 1_024 , lowerCamelCase__ = "relu" , lowerCamelCase__ = 6 , lowerCamelCase__ = 10 , lowerCamelCase__ = 8 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 2_048 , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 4 , lowerCamelCase__ = 255 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 12_544 , lowerCamelCase__ = 3.0 , lowerCamelCase__ = 0.75 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = True , lowerCamelCase__ = [4, 8, 16, 32] , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Tuple:
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowerCamelCase = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = backbone_config.pop('model_type' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(lowerCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {','.join(self.backbones_supported )}""" )
__lowerCamelCase = backbone_config
__lowerCamelCase = feature_size
__lowerCamelCase = mask_feature_size
__lowerCamelCase = hidden_dim
__lowerCamelCase = encoder_feedforward_dim
__lowerCamelCase = activation_function
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = dim_feedforward
__lowerCamelCase = pre_norm
__lowerCamelCase = enforce_input_projection
__lowerCamelCase = common_stride
__lowerCamelCase = ignore_value
__lowerCamelCase = num_queries
__lowerCamelCase = no_object_weight
__lowerCamelCase = class_weight
__lowerCamelCase = mask_weight
__lowerCamelCase = dice_weight
__lowerCamelCase = train_num_points
__lowerCamelCase = oversample_ratio
__lowerCamelCase = importance_sample_ratio
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = feature_strides
__lowerCamelCase = output_auxiliary_logits
__lowerCamelCase = decoder_layers
super().__init__(**lowerCamelCase__ )
@classmethod
def lowercase_ ( cls , lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
return cls(
backbone_config=lowerCamelCase__ , **lowerCamelCase__ , )
def lowercase_ ( self ) -> Dict[str, any]:
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 348 | 0 |
"""simple docstring"""
__A = "Input must be a string of 8 numbers plus letter"
__A = "TRWAGMYFPDXBNJZSQVHLCKE"
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> bool:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__lowerCamelCase = F"""Expected string as input, found {type(UpperCamelCase__ ).__name__}"""
raise TypeError(UpperCamelCase__ )
__lowerCamelCase = spanish_id.replace('-' , '' ).upper()
if len(UpperCamelCase__ ) != 9:
raise ValueError(UpperCamelCase__ )
try:
__lowerCamelCase = int(spanish_id_clean[0:8] )
__lowerCamelCase = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(UpperCamelCase__ ) from ex
if letter.isdigit():
raise ValueError(UpperCamelCase__ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = 42
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase__ = 32 , lowerCamelCase__ = 64 , lowerCamelCase__ = 20 , lowerCamelCase__ = 768 , lowerCamelCase__=77 , lowerCamelCase__=4 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = "silu" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "linear" , lowerCamelCase__ = "prd" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> Tuple:
'''simple docstring'''
super().__init__()
__lowerCamelCase = num_attention_heads
__lowerCamelCase = attention_head_dim
__lowerCamelCase = num_attention_heads * attention_head_dim
__lowerCamelCase = additional_embeddings
__lowerCamelCase = time_embed_dim or inner_dim
__lowerCamelCase = embedding_proj_dim or embedding_dim
__lowerCamelCase = clip_embed_dim or embedding_dim
__lowerCamelCase = Timesteps(lowerCamelCase__ , lowerCamelCase__ , 0 )
__lowerCamelCase = TimestepEmbedding(lowerCamelCase__ , lowerCamelCase__ , out_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if embedding_proj_norm_type is None:
__lowerCamelCase = None
elif embedding_proj_norm_type == "layer":
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if encoder_hid_proj_type is None:
__lowerCamelCase = None
elif encoder_hid_proj_type == "linear":
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase__ ) )
if added_emb_type == "prd":
__lowerCamelCase = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase__ ) )
elif added_emb_type is None:
__lowerCamelCase = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
__lowerCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dropout=lowerCamelCase__ , activation_fn='gelu' , attention_bias=lowerCamelCase__ , )
for d in range(lowerCamelCase__ )
] )
if norm_in_type == "layer":
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
elif norm_in_type is None:
__lowerCamelCase = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
__lowerCamelCase = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , lowerCamelCase__ , persistent=lowerCamelCase__ )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase_ ( self ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
__lowerCamelCase = {}
def fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , 'set_processor' ):
__lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return processors
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , 'set_processor' ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
module.set_processor(lowerCamelCase__ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , ) -> int:
'''simple docstring'''
__lowerCamelCase = hidden_states.shape[0]
__lowerCamelCase = timestep
if not torch.is_tensor(lowerCamelCase__ ):
__lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
__lowerCamelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCamelCase = timesteps * torch.ones(lowerCamelCase__ , dtype=timesteps.dtype , device=timesteps.device )
__lowerCamelCase = self.time_proj(lowerCamelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__lowerCamelCase = timesteps_projected.to(dtype=self.dtype )
__lowerCamelCase = self.time_embedding(lowerCamelCase__ )
if self.embedding_proj_norm is not None:
__lowerCamelCase = self.embedding_proj_norm(lowerCamelCase__ )
__lowerCamelCase = self.embedding_proj(lowerCamelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__lowerCamelCase = self.encoder_hidden_states_proj(lowerCamelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
__lowerCamelCase = self.proj_in(lowerCamelCase__ )
__lowerCamelCase = self.positional_embedding.to(hidden_states.dtype )
__lowerCamelCase = []
__lowerCamelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__lowerCamelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__lowerCamelCase = hidden_states[:, None, :]
__lowerCamelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__lowerCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__ , -1 , -1 )
additional_embeds.append(lowerCamelCase__ )
__lowerCamelCase = torch.cat(
lowerCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__lowerCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__lowerCamelCase = F.pad(
lowerCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__lowerCamelCase = hidden_states + positional_embeddings
if attention_mask is not None:
__lowerCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
__lowerCamelCase = F.pad(lowerCamelCase__ , (0, self.additional_embeddings) , value=0.0 )
__lowerCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__lowerCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__lowerCamelCase = self.norm_in(lowerCamelCase__ )
for block in self.transformer_blocks:
__lowerCamelCase = block(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowerCamelCase = self.norm_out(lowerCamelCase__ )
if self.prd_embedding is not None:
__lowerCamelCase = hidden_states[:, -1]
else:
__lowerCamelCase = hidden_states[:, additional_embeddings_len:]
__lowerCamelCase = self.proj_to_clip_embeddings(lowerCamelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 348 | 0 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=14 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_labels
__lowerCamelCase = use_mc_token_ids
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = self.vocab_size - 1
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
if self.use_mc_token_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
__lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = CTRLModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ , head_mask=lowerCamelCase__ )
model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = CTRLLMHeadModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = CTRLForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
snake_case_ = (CTRLLMHeadModel,) if is_torch_available() else ()
snake_case_ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = CTRLModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , n_embd=37 )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCamelCase__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
pass
@slow
def lowercase_ ( self ) -> str:
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = CTRLModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(lowerCamelCase__ )
__lowerCamelCase = torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=lowerCamelCase__ ) # Legal the president is
__lowerCamelCase = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__lowerCamelCase = model.generate(lowerCamelCase__ , do_sample=lowerCamelCase__ )
self.assertListEqual(output_ids[0].tolist() , lowerCamelCase__ )
| 357 |
import sys
from collections import defaultdict
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = []
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.node_position[vertex]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = pos
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase = 2 * start + 1
else:
__lowerCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase = temp, tempa
__lowerCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCamelCase__ )
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = position[index]
while index != 0:
__lowerCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase = heap[parent]
__lowerCamelCase = position[parent]
self.set_position(position[parent] , lowerCamelCase__ )
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , lowerCamelCase__ )
break
__lowerCamelCase = parent
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , 0 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = len(lowerCamelCase__ ) // 2 - 1
for i in range(lowerCamelCase__ , -1 , -1 ):
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = positions[0]
__lowerCamelCase = sys.maxsize
self.top_to_bottom(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ )
return temp
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Heap()
__lowerCamelCase = [0] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase = []
for vertex in range(len(UpperCamelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase__ )
heap.node_position.append(UpperCamelCase__ )
__lowerCamelCase = []
__lowerCamelCase = 1
__lowerCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase = 0
__lowerCamelCase = distance
heap.heapify(UpperCamelCase__ , UpperCamelCase__ )
for _ in range(1 , len(UpperCamelCase__ ) ):
__lowerCamelCase = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase__ )]
):
__lowerCamelCase = distance
heap.bottom_to_top(
UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A = int(input("Enter number of edges: ").strip())
__A = defaultdict(list)
for _ in range(edges_number):
__A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 348 | 0 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__lowerCamelCase = deepcopy(lowerCamelCase__ )
elif os.path.exists(lowerCamelCase__ ):
with io.open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as f:
__lowerCamelCase = json.load(lowerCamelCase__ )
else:
try:
__lowerCamelCase = baseaa.urlsafe_baadecode(lowerCamelCase__ ).decode('utf-8' )
__lowerCamelCase = json.loads(lowerCamelCase__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
__lowerCamelCase = config
self.set_stage_and_offload()
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.get_value('zero_optimization.stage' , -1 )
# offload
__lowerCamelCase = False
if self.is_zeroa() or self.is_zeroa():
__lowerCamelCase = set(['cpu', 'nvme'] )
__lowerCamelCase = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
__lowerCamelCase = True
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self.config
# find the config node of interest if it exists
__lowerCamelCase = ds_key_long.split('.' )
__lowerCamelCase = nodes.pop()
for node in nodes:
__lowerCamelCase = config.get(lowerCamelCase__ )
if config is None:
return None, ds_key
return config, ds_key
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.find_config_node(lowerCamelCase__ )
if config is None:
return default
return config.get(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=False ) -> Any:
'''simple docstring'''
__lowerCamelCase = self.config
# find the config node of interest if it exists
__lowerCamelCase = ds_key_long.split('.' )
for node in nodes:
__lowerCamelCase = config
__lowerCamelCase = config.get(lowerCamelCase__ )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.get_value(lowerCamelCase__ )
return False if value is None else bool(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.get_value(lowerCamelCase__ )
return False if value is None else not bool(lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
return self._stage == 2
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self._stage == 3
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
return self._offload
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = engine
def lowercase_ ( self , lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
self.engine.backward(lowerCamelCase__ , **lowerCamelCase__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowerCamelCase__ , device_placement=lowerCamelCase__ , scaler=lowerCamelCase__ )
__lowerCamelCase = hasattr(self.optimizer , 'overflow' )
def lowercase_ ( self , lowerCamelCase__=None ) -> Tuple:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def lowercase_ ( self ) -> str:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
if self.__has_overflow__:
return self.optimizer.overflow
return False
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=0.0_01 , lowerCamelCase__=0 , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = params
__lowerCamelCase = lr
__lowerCamelCase = weight_decay
__lowerCamelCase = kwargs
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=0 , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = optimizer
__lowerCamelCase = total_num_steps
__lowerCamelCase = warmup_num_steps
__lowerCamelCase = kwargs
| 358 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
snake_case_ = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
snake_case_ = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
snake_case_ = "question"
snake_case_ = "context"
snake_case_ = "answers"
@property
def lowercase_ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 348 | 0 |
import math
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(UpperCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__A = "Enter the base and the power separated by a comma: "
__A , __A = map(int, input(prompt).split(","))
__A , __A = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
__A = res(xa, ya)
__A = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 359 |
import requests
__A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__lowerCamelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 348 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : int = 1000 ) -> int:
"""simple docstring"""
__lowerCamelCase = -1
__lowerCamelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__lowerCamelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
__lowerCamelCase = n - a - b
if c * c == (a * a + b * b):
__lowerCamelCase = a * b * c
if candidate >= product:
__lowerCamelCase = candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 360 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__A = logging.get_logger(__name__)
__A = TypeVar("DatasetType", Dataset, IterableDataset)
def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[List[float]] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" )
if i == 0:
__lowerCamelCase , __lowerCamelCase = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ )
else:
return _interleave_iterable_datasets(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" )
if i == 0:
__lowerCamelCase , __lowerCamelCase = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
else:
return _concatenate_iterable_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
| 348 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : list , UpperCamelCase__ : int | None = None , UpperCamelCase__ : int | None = None ) -> None:
"""simple docstring"""
if start is None:
__lowerCamelCase = 0
if end is None:
__lowerCamelCase = len(UpperCamelCase__ ) - 1
if start >= end:
return
__lowerCamelCase = (start + end) // 2
slowsort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
slowsort(UpperCamelCase__ , mid + 1 , UpperCamelCase__ )
if sequence[end] < sequence[mid]:
__lowerCamelCase , __lowerCamelCase = sequence[mid], sequence[end]
slowsort(UpperCamelCase__ , UpperCamelCase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 361 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = ["model.decoder.embed_positions.weights"]
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
if "emb" in name:
__lowerCamelCase = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
__lowerCamelCase = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
__lowerCamelCase = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
__lowerCamelCase = name.replace('linear1' , 'fc1' )
if "linear2" in name:
__lowerCamelCase = name.replace('linear2' , 'fc2' )
if "norm1" in name:
__lowerCamelCase = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
__lowerCamelCase = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
__lowerCamelCase = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
__lowerCamelCase = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
__lowerCamelCase = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCamelCase = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def lowerCamelCase_ ( UpperCamelCase__ : OrderedDict , UpperCamelCase__ : int ) -> Tuple[Dict, Dict]:
"""simple docstring"""
__lowerCamelCase = list(state_dict.keys() )
__lowerCamelCase = {}
for key in keys:
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = rename_keys(UpperCamelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCamelCase = val[:hidden_size, :]
__lowerCamelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCamelCase = val
else:
__lowerCamelCase = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
__lowerCamelCase = 1024
__lowerCamelCase = 24
__lowerCamelCase = 16
elif checkpoint == "medium":
__lowerCamelCase = 1536
__lowerCamelCase = 48
__lowerCamelCase = 24
elif checkpoint == "large":
__lowerCamelCase = 2048
__lowerCamelCase = 48
__lowerCamelCase = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
__lowerCamelCase = MusicgenDecoderConfig(
hidden_size=UpperCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , )
return config
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[int]="cpu" ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = MusicGen.get_pretrained(UpperCamelCase__ , device=UpperCamelCase__ )
__lowerCamelCase = decoder_config_from_checkpoint(UpperCamelCase__ )
__lowerCamelCase = fairseq_model.lm.state_dict()
__lowerCamelCase , __lowerCamelCase = rename_state_dict(
UpperCamelCase__ , hidden_size=decoder_config.hidden_size )
__lowerCamelCase = TaEncoderModel.from_pretrained('t5-base' )
__lowerCamelCase = EncodecModel.from_pretrained('facebook/encodec_32khz' )
__lowerCamelCase = MusicgenForCausalLM(UpperCamelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCamelCase , __lowerCamelCase = decoder.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
__lowerCamelCase = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase__ , audio_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase__ )
# check we can do a forward pass
__lowerCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCamelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCamelCase = model(input_ids=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
__lowerCamelCase = AutoTokenizer.from_pretrained('t5-base' )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
__lowerCamelCase = MusicgenProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# set the appropriate bos/pad token ids
__lowerCamelCase = 2048
__lowerCamelCase = 2048
# set other default generation config params
__lowerCamelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCamelCase = True
__lowerCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(UpperCamelCase__ )
processor.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
__A = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 348 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''xlm-roberta-xl'''
def __init__( self , lowerCamelCase__=250_880 , lowerCamelCase__=2_560 , lowerCamelCase__=36 , lowerCamelCase__=32 , lowerCamelCase__=10_240 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=514 , lowerCamelCase__=1 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-05 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__="absolute" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowerCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 362 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''sew-d'''
def __init__( self , lowerCamelCase__=32 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__=2 , lowerCamelCase__=512 , lowerCamelCase__=256 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=("p2c", "c2p") , lowerCamelCase__="layer_norm" , lowerCamelCase__="gelu_python" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-7 , lowerCamelCase__=1e-5 , lowerCamelCase__="group" , lowerCamelCase__="gelu" , lowerCamelCase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCamelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase__=False , lowerCamelCase__=128 , lowerCamelCase__=16 , lowerCamelCase__=True , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__="mean" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_norm
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = squeeze_factor
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = position_buckets
__lowerCamelCase = share_att_key
__lowerCamelCase = relative_attention
__lowerCamelCase = norm_rel_ebd
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = feature_layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# sequence classification
__lowerCamelCase = use_weighted_layer_sum
__lowerCamelCase = classifier_proj_size
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 348 | 0 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__A = data_utils.TransfoXLTokenizer
__A = data_utils.TransfoXLCorpus
__A = data_utils
__A = data_utils
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCamelCase__ , 'rb' ) as fp:
__lowerCamelCase = pickle.load(UpperCamelCase__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__lowerCamelCase = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
__lowerCamelCase = corpus.vocab.__dict__
torch.save(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , UpperCamelCase__ )
__lowerCamelCase = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__lowerCamelCase = os.path.abspath(UpperCamelCase__ )
__lowerCamelCase = os.path.abspath(UpperCamelCase__ )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__lowerCamelCase = TransfoXLConfig()
else:
__lowerCamelCase = TransfoXLConfig.from_json_file(UpperCamelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
__lowerCamelCase = TransfoXLLMHeadModel(UpperCamelCase__ )
__lowerCamelCase = load_tf_weights_in_transfo_xl(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
__lowerCamelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(F"""Save PyTorch model to {os.path.abspath(UpperCamelCase__ )}""" )
torch.save(model.state_dict() , UpperCamelCase__ )
print(F"""Save configuration file to {os.path.abspath(UpperCamelCase__ )}""" )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
__A = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 363 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__A = logging.get_logger("transformers.models.speecht5")
__A = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__A = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__A = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__A = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__A = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__A = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__A = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__A = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__A = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A = []
__A = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> Dict:
"""simple docstring"""
for attribute in key.split('.' ):
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
elif weight_type == "running_mean":
__lowerCamelCase = value
elif weight_type == "running_var":
__lowerCamelCase = value
elif weight_type == "num_batches_tracked":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ) -> Any:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__lowerCamelCase , __lowerCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = []
if task == "s2t":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2T
__lowerCamelCase = IGNORE_KEYS_S2T
elif task == "t2s":
__lowerCamelCase = None
__lowerCamelCase = MAPPING_T2S
__lowerCamelCase = IGNORE_KEYS_T2S
elif task == "s2s":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2S
__lowerCamelCase = IGNORE_KEYS_S2S
else:
raise ValueError(F"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase__ , UpperCamelCase__ ):
logger.info(F"""{name} was ignored""" )
continue
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__lowerCamelCase , __lowerCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
__lowerCamelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2]
__lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ )
if "weight_g" in name:
__lowerCamelCase = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase = 'weight_v'
elif "bias" in name:
__lowerCamelCase = 'bias'
elif "weight" in name:
__lowerCamelCase = 'weight'
elif "running_mean" in name:
__lowerCamelCase = 'running_mean'
elif "running_var" in name:
__lowerCamelCase = 'running_var'
elif "num_batches_tracked" in name:
__lowerCamelCase = 'num_batches_tracked'
else:
__lowerCamelCase = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = full_name.split('conv_layers.' )[-1]
__lowerCamelCase = name.split('.' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=None , ) -> Tuple:
"""simple docstring"""
if config_path is not None:
__lowerCamelCase = SpeechTaConfig.from_pretrained(UpperCamelCase__ )
else:
__lowerCamelCase = SpeechTaConfig()
if task == "s2t":
__lowerCamelCase = config.max_text_positions
__lowerCamelCase = SpeechTaForSpeechToText(UpperCamelCase__ )
elif task == "t2s":
__lowerCamelCase = 1876
__lowerCamelCase = 600
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForTextToSpeech(UpperCamelCase__ )
elif task == "s2s":
__lowerCamelCase = 1876
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForSpeechToSpeech(UpperCamelCase__ )
else:
raise ValueError(F"""Unknown task name: {task}""" )
if vocab_path:
__lowerCamelCase = SpeechTaTokenizer(UpperCamelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken('<mask>' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
__lowerCamelCase = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
__lowerCamelCase = SpeechTaFeatureExtractor()
__lowerCamelCase = SpeechTaProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
__lowerCamelCase = torch.load(UpperCamelCase__ )
recursively_load_weights(fairseq_checkpoint['model'] , UpperCamelCase__ , UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 348 | 0 |
import argparse
import copy
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = {}
with open(UpperCamelCase__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__lowerCamelCase = []
_list.append([line.split()[1], line.split()[2]] )
__lowerCamelCase = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__lowerCamelCase = []
_list.append([line.split()[0], line.split()[2]] )
__lowerCamelCase = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int ) -> Optional[int]:
"""simple docstring"""
with open(UpperCamelCase__ ) as f:
__lowerCamelCase = f.read(1 )
__lowerCamelCase = start_node
__lowerCamelCase = []
__lowerCamelCase = start_node
__lowerCamelCase = 0
while visiting not in first_solution:
__lowerCamelCase = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(UpperCamelCase__ ) and k[0] not in first_solution:
__lowerCamelCase = k[1]
__lowerCamelCase = k[0]
first_solution.append(UpperCamelCase__ )
__lowerCamelCase = distance_of_first_solution + int(UpperCamelCase__ )
__lowerCamelCase = best_node
first_solution.append(UpperCamelCase__ )
__lowerCamelCase = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__lowerCamelCase = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ) -> str:
"""simple docstring"""
__lowerCamelCase = []
for n in solution[1:-1]:
__lowerCamelCase = solution.index(UpperCamelCase__ )
for kn in solution[1:-1]:
__lowerCamelCase = solution.index(UpperCamelCase__ )
if n == kn:
continue
__lowerCamelCase = copy.deepcopy(UpperCamelCase__ )
__lowerCamelCase = kn
__lowerCamelCase = n
__lowerCamelCase = 0
for k in _tmp[:-1]:
__lowerCamelCase = _tmp[_tmp.index(UpperCamelCase__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__lowerCamelCase = distance + int(i[1] )
_tmp.append(UpperCamelCase__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__lowerCamelCase = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda UpperCamelCase__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
__lowerCamelCase = 1
__lowerCamelCase = first_solution
__lowerCamelCase = []
__lowerCamelCase = distance_of_first_solution
__lowerCamelCase = solution
while count <= iters:
__lowerCamelCase = find_neighborhood(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = 0
__lowerCamelCase = neighborhood[index_of_best_solution]
__lowerCamelCase = len(UpperCamelCase__ ) - 1
__lowerCamelCase = False
while not found:
__lowerCamelCase = 0
while i < len(UpperCamelCase__ ):
if best_solution[i] != solution[i]:
__lowerCamelCase = best_solution[i]
__lowerCamelCase = solution[i]
break
__lowerCamelCase = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__lowerCamelCase = True
__lowerCamelCase = best_solution[:-1]
__lowerCamelCase = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__lowerCamelCase = cost
__lowerCamelCase = solution
else:
__lowerCamelCase = index_of_best_solution + 1
__lowerCamelCase = neighborhood[index_of_best_solution]
if len(UpperCamelCase__ ) >= size:
tabu_list.pop(0 )
__lowerCamelCase = count + 1
return best_solution_ever, best_cost
def lowerCamelCase_ ( UpperCamelCase__ : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = generate_neighbours(args.File )
__lowerCamelCase , __lowerCamelCase = generate_first_solution(
args.File , UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase = tabu_search(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.Iterations , args.Size , )
print(F"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 364 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = [False] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ )
def dfs(UpperCamelCase__ : int , UpperCamelCase__ : int ):
__lowerCamelCase = True
__lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase__ , 1 - c )
for i in range(len(UpperCamelCase__ ) ):
if not visited[i]:
dfs(UpperCamelCase__ , 0 )
for i in range(len(UpperCamelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__A = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 348 | 0 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__A = 4
__A = 3
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
pass
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> Any:
"""simple docstring"""
for shard in shards:
for i in range(UpperCamelCase__ ):
yield {"i": i, "shard": shard}
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = int(os.environ['RANK'] )
__lowerCamelCase = int(os.environ['WORLD_SIZE'] )
__lowerCamelCase = ArgumentParser()
parser.add_argument('--streaming' , type=UpperCamelCase__ )
parser.add_argument('--local_rank' , type=UpperCamelCase__ )
parser.add_argument('--num_workers' , type=UpperCamelCase__ , default=0 )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = args.streaming
__lowerCamelCase = args.num_workers
__lowerCamelCase = {'shards': [F"""shard_{shard_idx}""" for shard_idx in range(UpperCamelCase__ )]}
__lowerCamelCase = IterableDataset.from_generator(UpperCamelCase__ , gen_kwargs=UpperCamelCase__ )
if not streaming:
__lowerCamelCase = Dataset.from_list(list(UpperCamelCase__ ) )
__lowerCamelCase = split_dataset_by_node(UpperCamelCase__ , rank=UpperCamelCase__ , world_size=UpperCamelCase__ )
__lowerCamelCase = torch.utils.data.DataLoader(UpperCamelCase__ , num_workers=UpperCamelCase__ )
__lowerCamelCase = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__lowerCamelCase = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__lowerCamelCase = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 365 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
__lowerCamelCase = tokenizer('Hello there' , return_tensors='pt' ).input_ids
__lowerCamelCase = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__lowerCamelCase = model(input_ids.to(lowerCamelCase__ ) , labels=labels.to(lowerCamelCase__ ) ).loss
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 348 | 0 |
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Optional[Any]:
"""simple docstring"""
def decorator(UpperCamelCase__ : Optional[int] ):
__lowerCamelCase = getattr(UpperCamelCase__ , 'handle_key' , [] )
handle += [key]
setattr(UpperCamelCase__ , 'handle_key' , UpperCamelCase__ )
return func
return decorator
def lowerCamelCase_ ( *UpperCamelCase__ : List[str] ) -> Dict:
"""simple docstring"""
def decorator(UpperCamelCase__ : Union[str, Any] ):
__lowerCamelCase = getattr(UpperCamelCase__ , 'handle_key' , [] )
handle += keys
setattr(UpperCamelCase__ , 'handle_key' , UpperCamelCase__ )
return func
return decorator
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __new__( cls , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCamelCase = super().__new__(cls , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if not hasattr(lowerCamelCase__ , 'key_handler' ):
setattr(lowerCamelCase__ , 'key_handler' , {} )
setattr(lowerCamelCase__ , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
__lowerCamelCase = getattr(lowerCamelCase__ , 'handle_key' , [] )
for key in handled_keys:
__lowerCamelCase = value
return new_cls
@staticmethod
def lowercase_ ( cls ) -> int:
'''simple docstring'''
__lowerCamelCase = get_character()
if char != KEYMAP["undefined"]:
__lowerCamelCase = ord(lowerCamelCase__ )
__lowerCamelCase = cls.key_handler.get(lowerCamelCase__ )
if handler:
__lowerCamelCase = char
return handler(cls )
else:
return None
def lowerCamelCase_ ( cls : int ) -> List[Any]:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 366 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
__lowerCamelCase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
__lowerCamelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCamelCase = [4, 4, 4, 4]
__lowerCamelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
else:
__lowerCamelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCamelCase = 96
elif "small" in model_name:
__lowerCamelCase = 96
elif "base" in model_name:
__lowerCamelCase = 128
elif "large" in model_name:
__lowerCamelCase = 192
elif "xlarge" in model_name:
__lowerCamelCase = 256
elif "huge" in model_name:
__lowerCamelCase = 352
# set label information
__lowerCamelCase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowerCamelCase = 'imagenet-22k-id2label.json'
else:
__lowerCamelCase = 'imagenet-1k-id2label.json'
__lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = FocalNetConfig(
embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , focal_levels=UpperCamelCase__ , focal_windows=UpperCamelCase__ , use_conv_embed=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , use_post_layernorm=UpperCamelCase__ , use_layerscale=UpperCamelCase__ , )
return config
def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> str:
"""simple docstring"""
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowerCamelCase = 'encoder.' + name
if "encoder.layers" in name:
__lowerCamelCase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowerCamelCase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowerCamelCase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCamelCase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCamelCase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCamelCase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowerCamelCase = 'layernorm.weight'
if name == "norm.bias":
__lowerCamelCase = 'layernorm.bias'
if "head" in name:
__lowerCamelCase = name.replace('head' , 'classifier' )
else:
__lowerCamelCase = 'focalnet.' + name
return name
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Dict:
"""simple docstring"""
__lowerCamelCase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowerCamelCase = model_name_to_url[model_name]
print('Checkpoint URL: ' , UpperCamelCase__ )
__lowerCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
__lowerCamelCase = get_focalnet_config(UpperCamelCase__ )
__lowerCamelCase = FocalNetForImageClassification(UpperCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(UpperCamelCase__ )
# verify conversion
__lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase = BitImageProcessor(
do_resize=UpperCamelCase__ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ , crop_size=224 , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , )
__lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
__lowerCamelCase = processor(images=UpperCamelCase__ , return_tensors='pt' )
__lowerCamelCase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
__lowerCamelCase = image_transforms(UpperCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCamelCase__ , atol=1E-4 )
__lowerCamelCase = model(**UpperCamelCase__ )
__lowerCamelCase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
__lowerCamelCase = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
__lowerCamelCase = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
__lowerCamelCase = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
__lowerCamelCase = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
__lowerCamelCase = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
__A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 348 | 0 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , split=lowerCamelCase__ , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , streaming=lowerCamelCase__ , num_proc=lowerCamelCase__ , **lowerCamelCase__ , )
__lowerCamelCase = path_or_paths if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else {self.split: path_or_paths}
__lowerCamelCase = Text(
cache_dir=lowerCamelCase__ , data_files=lowerCamelCase__ , features=lowerCamelCase__ , **lowerCamelCase__ , )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
# Build iterable dataset
if self.streaming:
__lowerCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , num_proc=self.num_proc , )
__lowerCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
| 367 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : list[float] , UpperCamelCase__ : list[float] ) -> float:
"""simple docstring"""
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase , __lowerCamelCase = divmod(len(UpperCamelCase__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = [float(x) for x in input("Enter the elements of first array: ").split()]
__A = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 348 | 0 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True , lowerCamelCase__ = False ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = scheduler
__lowerCamelCase = optimizers if isinstance(lowerCamelCase__ , (list, tuple) ) else [optimizers]
__lowerCamelCase = split_batches
__lowerCamelCase = step_with_optimizer
__lowerCamelCase = GradientState()
def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowerCamelCase__ , **lowerCamelCase__ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowerCamelCase__ , **lowerCamelCase__ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__lowerCamelCase = AcceleratorState().num_processes
for _ in range(lowerCamelCase__ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowerCamelCase__ , **lowerCamelCase__ )
else:
self.scheduler.step(*lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.scheduler.get_last_lr()
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
return self.scheduler.state_dict()
def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
self.scheduler.load_state_dict(lowerCamelCase__ )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
return self.scheduler.get_lr()
def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.scheduler.print_lr(*lowerCamelCase__ , **lowerCamelCase__ )
| 368 |
__A = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.3_5_5_8_1_8,
}
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowerCamelCase = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(UpperCamelCase__ )}"""
)
raise ValueError(UpperCamelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 0 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=30 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=10 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=0.6 , lowerCamelCase__=None , ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = mask_ratio
__lowerCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__lowerCamelCase = (image_size // patch_size) ** 2
__lowerCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = TFViTMAEModel(config=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
# expected sequence length = num_patches
__lowerCamelCase = (self.image_size // self.patch_size) ** 2
__lowerCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__lowerCamelCase = 1
__lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
__lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
__lowerCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) = config_and_inputs
__lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
snake_case_ = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = TFViTMAEModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
pass
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , tf.keras.layers.Layer ) )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
np.random.seed(2 )
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
__lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
__lowerCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
__lowerCamelCase = outputs_dict[0].numpy()
__lowerCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
np.random.seed(2 )
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
__lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCamelCase__ ):
__lowerCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCamelCase__ ):
__lowerCamelCase = v.numpy()
else:
__lowerCamelCase = np.array(lowerCamelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = prepare_numpy_arrays(lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
np.random.seed(2 )
__lowerCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
__lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__lowerCamelCase = tf_noise
super().check_pt_tf_models(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
np.random.seed(2 )
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCamelCase__ )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(lowerCamelCase__ , lowerCamelCase__ ),)
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCamelCase__ , '_keras_serializable' , lowerCamelCase__ )
}
__lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
__lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
__lowerCamelCase = main_layer_class(lowerCamelCase__ )
__lowerCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
__lowerCamelCase = tf.keras.Model(lowerCamelCase__ , outputs=main_layer(lowerCamelCase__ ) )
__lowerCamelCase = model(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = os.path.join(lowerCamelCase__ , 'keras_model.h5' )
model.save(lowerCamelCase__ )
__lowerCamelCase = tf.keras.models.load_model(
lowerCamelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCamelCase__ , tf.keras.Model )
__lowerCamelCase = model(lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
np.random.seed(2 )
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
__lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
__lowerCamelCase = outputs.last_hidden_state.numpy()
__lowerCamelCase = 0
else:
__lowerCamelCase = outputs.logits.numpy()
__lowerCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ , saved_model=lowerCamelCase__ )
__lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
__lowerCamelCase = after_outputs['last_hidden_state'].numpy()
__lowerCamelCase = 0
else:
__lowerCamelCase = after_outputs['logits'].numpy()
__lowerCamelCase = 0
__lowerCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1e-5 )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
np.random.seed(2 )
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
__lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
__lowerCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCamelCase__ )
__lowerCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
__lowerCamelCase = model_class.from_config(model.config )
__lowerCamelCase = new_model(lowerCamelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
__lowerCamelCase = new_model(lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
pass
@slow
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
np.random.seed(2 )
__lowerCamelCase = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__lowerCamelCase = ViTMAEConfig()
__lowerCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__lowerCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
__lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
# verify the logits
__lowerCamelCase = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__lowerCamelCase = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
| 369 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''philschmid/bart-large-cnn-samsum'''
snake_case_ = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
snake_case_ = '''summarizer'''
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSeqaSeqLM
snake_case_ = ['''text''']
snake_case_ = ['''text''']
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.pre_processor(lowerCamelCase__ , return_tensors='pt' , truncation=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return self.model.generate(**lowerCamelCase__ )[0]
def lowercase_ ( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
return self.pre_processor.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
| 348 | 0 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__A = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
__A = []
__A = []
__A = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
__A = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f'''🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results''',
"emoji": True,
},
}
]
__A = 0
for log in Path().glob("*.log"):
__A = 0
with open(log, "r") as f:
for line in f:
__A = json.loads(line)
if line.get("nodeid", "") != "":
__A = line["nodeid"]
if line.get("duration", None) is not None:
__A = f'''{line['duration']:.4f}'''
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__A = []
log.unlink()
__A = ""
__A = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
__A = []
__A = {}
for test in failed_tests:
__A = test[0].split("::")
__A = data[0].split("/")[-1]
if data[0] not in filesafailed:
__A = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__A = [test[0] for test in failed_table]
__A = list(set(files))
# Count number of instances in failed_tests
__A = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__A = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
__A = "Too many failed tests, please see the full report in the Action results."
__A = len(err) + 10
__A = message[: 30_00 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
__A = "No failed tests! 🤗"
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
__A = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
__A = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
__A = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f'''https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
payload.append(action_button)
__A = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f'''Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}''',
}
],
}
payload.append(date_report)
__A = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
__A = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__A = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
__A = row[0]
else:
__A = ""
__A = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```''',
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 370 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=4 , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = True
__lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = True
snake_case_ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__lowerCamelCase = model(lowerCamelCase__ )[0]
__lowerCamelCase = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
__lowerCamelCase = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__lowerCamelCase = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
__lowerCamelCase = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 348 | 0 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any]=None ) -> Dict:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match"""
__lowerCamelCase = nn.Parameter(UpperCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match"""
__lowerCamelCase = nn.Parameter(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = np.asarray(weights[0] )
__lowerCamelCase = np.asarray(weights[1] )
__lowerCamelCase = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowerCamelCase = np.asarray(weights[0] )
__lowerCamelCase = np.asarray(weights[1] )
__lowerCamelCase = np.asarray(weights[2] )
__lowerCamelCase = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ) -> str:
"""simple docstring"""
__lowerCamelCase = weights[0][0][0]
__lowerCamelCase = np.asarray(layer_norm_a[0] )
__lowerCamelCase = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# lsh weights + output
__lowerCamelCase = weights[0][1]
if len(UpperCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
else:
set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
# intermediate weighs
__lowerCamelCase = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase__ ) == 4:
__lowerCamelCase = intermediate_weights[2]
# layernorm 2
__lowerCamelCase = np.asarray(intermediate_weights[0][0] )
__lowerCamelCase = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# intermediate dense
__lowerCamelCase = np.asarray(intermediate_weights[1][0] )
__lowerCamelCase = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
# intermediate out
__lowerCamelCase = np.asarray(intermediate_weights[4][0] )
__lowerCamelCase = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = torch_model.reformer
# word embeds
__lowerCamelCase = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , )
if isinstance(weights[3] , UpperCamelCase__ ):
__lowerCamelCase = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__lowerCamelCase = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"""{position_embeddings[emb_idx]} emb does not match"""
__lowerCamelCase = nn.Parameter(torch.tensor(UpperCamelCase__ ) )
__lowerCamelCase = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__lowerCamelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# output layer norm
__lowerCamelCase = np.asarray(weights[7][0] )
__lowerCamelCase = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# output embeddings
__lowerCamelCase = np.asarray(weights[9][0] )
__lowerCamelCase = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = ReformerConfig.from_json_file(UpperCamelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
__lowerCamelCase = ReformerModelWithLMHead(UpperCamelCase__ )
with open(UpperCamelCase__ , 'rb' ) as f:
__lowerCamelCase = pickle.load(UpperCamelCase__ )['weights']
set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 371 |
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
| 348 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = 3
__lowerCamelCase = (32, 32)
__lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase__ )
return image
@property
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowerCamelCase__ )
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
def extract(*lowerCamelCase__ , **lowerCamelCase__ ):
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = torch.ones([0] )
def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
self.pixel_values.to(lowerCamelCase__ )
return self
return Out()
return extract
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.dummy_cond_unet
__lowerCamelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
__lowerCamelCase = self.dummy_vae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
__lowerCamelCase = StableDiffusionPipeline(
unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=self.dummy_extractor , )
__lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A painting of a squirrel eating a burger'
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = sd_pipe([prompt] , generator=lowerCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
__lowerCamelCase = output.images
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase__ , )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.dummy_cond_unet
__lowerCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
__lowerCamelCase = self.dummy_vae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
__lowerCamelCase = StableDiffusionPipeline(
unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=self.dummy_extractor , )
__lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A painting of a squirrel eating a burger'
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = sd_pipe([prompt] , generator=lowerCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
__lowerCamelCase = output.images
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase__ , )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert isinstance(pipe.scheduler , lowerCamelCase__ )
assert pipe.safety_checker is None
__lowerCamelCase = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
__lowerCamelCase = StableDiffusionPipeline.from_pretrained(lowerCamelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowerCamelCase = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.dummy_cond_unet
__lowerCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
__lowerCamelCase = self.dummy_vae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
__lowerCamelCase = unet.half()
__lowerCamelCase = vae.half()
__lowerCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
__lowerCamelCase = StableDiffusionPipeline(
unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=self.dummy_extractor , )
__lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A painting of a squirrel eating a burger'
__lowerCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase__ )
__lowerCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
__lowerCamelCase = 4_003_660_346
__lowerCamelCase = 7
# without safety guidance (sld_guidance_scale = 0)
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
__lowerCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
__lowerCamelCase = output.images
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
__lowerCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__lowerCamelCase = output.images
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase__ )
__lowerCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'padme amidala taking a bath artwork, safe for work, no nudity'
__lowerCamelCase = 2_734_971_755
__lowerCamelCase = 7
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
__lowerCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
__lowerCamelCase = output.images
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
__lowerCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__lowerCamelCase = output.images
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
__lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
__lowerCamelCase = 1_044_355_234
__lowerCamelCase = 12
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
__lowerCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
__lowerCamelCase = output.images
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
__lowerCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__lowerCamelCase = output.images
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 350 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowercase_ ( self , lowerCamelCase__=0 ) -> int:
'''simple docstring'''
__lowerCamelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowerCamelCase__ ) )
__lowerCamelCase = np.random.RandomState(lowerCamelCase__ )
__lowerCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# warmup pass to apply optimizations
__lowerCamelCase = pipe(**self.get_dummy_inputs() )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = ort.SessionOptions()
__lowerCamelCase = False
return options
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__lowerCamelCase = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A fantasy landscape, trending on artstation'
__lowerCamelCase = np.random.RandomState(0 )
__lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowerCamelCase = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__lowerCamelCase = init_image.resize((768, 512) )
__lowerCamelCase = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A fantasy landscape, trending on artstation'
__lowerCamelCase = np.random.RandomState(0 )
__lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowerCamelCase = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 348 | 0 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> tuple[int, int]:
"""simple docstring"""
if b == 0:
return (1, 0)
((__lowerCamelCase) , (__lowerCamelCase)) = extended_euclid(UpperCamelCase__ , a % b )
__lowerCamelCase = a // b
return (y, x - k * y)
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
((__lowerCamelCase) , (__lowerCamelCase)) = extended_euclid(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = na * na
__lowerCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
((__lowerCamelCase) , (__lowerCamelCase)) = extended_euclid(UpperCamelCase__ , UpperCamelCase__ )
if b < 0:
__lowerCamelCase = (b % n + n) % n
return b
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = invert_modulo(UpperCamelCase__ , UpperCamelCase__ ), invert_modulo(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = na * na
__lowerCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 351 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__A = logging.get_logger(__name__)
__A = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''whisper'''
snake_case_ = ['''past_key_values''']
snake_case_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCamelCase__=51_865 , lowerCamelCase__=80 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=1_536 , lowerCamelCase__=1_536 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=50_257 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="gelu" , lowerCamelCase__=256 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=1_500 , lowerCamelCase__=448 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=None , lowerCamelCase__=[220, 50_256] , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=False , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__=7 , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = num_mel_bins
__lowerCamelCase = d_model
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = use_cache
__lowerCamelCase = encoder_layers
__lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase = max_source_positions
__lowerCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase = classifier_proj_size
__lowerCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
__lowerCamelCase = median_filter_width
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , suppress_tokens=lowerCamelCase__ , begin_suppress_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__lowerCamelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
__lowerCamelCase = {0: 'batch'}
else:
__lowerCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ , direction='inputs' )
return common_inputs
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = 22_050 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 220 , ) -> Mapping[str, Any]:
'''simple docstring'''
__lowerCamelCase = OrderedDict()
__lowerCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase__ , framework=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , time_duration=lowerCamelCase__ , frequency=lowerCamelCase__ , )
__lowerCamelCase = encoder_inputs['input_features'].shape[2]
__lowerCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = encoder_inputs.pop('input_features' )
__lowerCamelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
__lowerCamelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def lowercase_ ( self ) -> float:
'''simple docstring'''
return 1e-3
| 348 | 0 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
__A = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
__A = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = (images / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowerCamelCase = numpy_to_pil(UpperCamelCase__ )
return images
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if images.ndim == 3:
__lowerCamelCase = images[None, ...]
__lowerCamelCase = (images * 255).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__lowerCamelCase = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
__lowerCamelCase = [Image.fromarray(UpperCamelCase__ ) for image in images]
return pil_images
| 352 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=14 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , ) -> int:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = rotary_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = initializer_range
__lowerCamelCase = None
__lowerCamelCase = vocab_size - 1
__lowerCamelCase = vocab_size - 1
__lowerCamelCase = vocab_size - 1
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(lowerCamelCase__ )
__lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ )
__lowerCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase__ , )
__lowerCamelCase = model(lowerCamelCase__ )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(lowerCamelCase__ )
__lowerCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
snake_case_ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = FlaxGPTJModelTester(self )
def lowercase_ ( self ) -> str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@tooslow
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )
__lowerCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCamelCase = False
__lowerCamelCase = model.config.eos_token_id
__lowerCamelCase = jax.jit(model.generate )
__lowerCamelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
__lowerCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@is_pt_flax_cross_test
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape
__lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval()
__lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa )
__lowerCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase__ )
__lowerCamelCase = fx_state
with torch.no_grad():
__lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple()
__lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase__ )
__lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
__lowerCamelCase = fx_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval()
__lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa )
__lowerCamelCase = load_flax_weights_in_pytorch_model(lowerCamelCase__ , fx_model.params )
__lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape
__lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple()
__lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase__ )
__lowerCamelCase = pt_model_class.from_pretrained(lowerCamelCase__ , from_flax=lowerCamelCase__ )
with torch.no_grad():
__lowerCamelCase = pt_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 348 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : int = 10 ) -> str:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or n < 0:
raise ValueError('Invalid input' )
__lowerCamelCase = 10**n
__lowerCamelCase = 2_8433 * (pow(2 , 783_0457 , UpperCamelCase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(10) = }''')
| 353 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__A = False
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return 12
@property
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return 12
@property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return 32
@property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowerCamelCase__ )
@property
def lowercase_ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = 12
__lowerCamelCase = 12
__lowerCamelCase = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
__lowerCamelCase = TransformeraDModel(**lowerCamelCase__ )
return model
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = 'cpu'
__lowerCamelCase = self.dummy_vqvae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = self.dummy_tokenizer
__lowerCamelCase = self.dummy_transformer
__lowerCamelCase = VQDiffusionScheduler(self.num_embed )
__lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings(learnable=lowerCamelCase__ )
__lowerCamelCase = VQDiffusionPipeline(
vqvae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , transformer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'teddy bear playing in the pool'
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='np' )
__lowerCamelCase = output.images
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe(
[prompt] , generator=lowerCamelCase__ , output_type='np' , return_dict=lowerCamelCase__ , num_inference_steps=2 )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowerCamelCase = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = 'cpu'
__lowerCamelCase = self.dummy_vqvae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = self.dummy_tokenizer
__lowerCamelCase = self.dummy_transformer
__lowerCamelCase = VQDiffusionScheduler(self.num_embed )
__lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowerCamelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
__lowerCamelCase = VQDiffusionPipeline(
vqvae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , transformer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'teddy bear playing in the pool'
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='np' )
__lowerCamelCase = output.images
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe(
[prompt] , generator=lowerCamelCase__ , output_type='np' , return_dict=lowerCamelCase__ , num_inference_steps=2 )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowerCamelCase = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
__lowerCamelCase = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
__lowerCamelCase = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 348 | 0 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__A = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__A = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
__A = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> Dict:
"""simple docstring"""
def remove_articles(UpperCamelCase__ : Any ):
__lowerCamelCase = re.compile(R'\b(a|an|the)\b' , re.UNICODE )
return re.sub(UpperCamelCase__ , ' ' , UpperCamelCase__ )
def white_space_fix(UpperCamelCase__ : Optional[int] ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase__ : List[Any] ):
__lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase__ : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase__ ) ) ) )
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
return int(normalize_answer(UpperCamelCase__ ) == normalize_answer(UpperCamelCase__ ) )
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> int:
"""simple docstring"""
__lowerCamelCase = [any(compute_exact(UpperCamelCase__ , UpperCamelCase__ ) for ref in refs ) for pred, refs in zip(UpperCamelCase__ , UpperCamelCase__ )]
return (sum(UpperCamelCase__ ) / len(UpperCamelCase__ )) * 100
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
__lowerCamelCase = Counter(UpperCamelCase__ )
__lowerCamelCase = Counter(UpperCamelCase__ )
__lowerCamelCase = Counter()
for sgram, scount in sgramcounter.items():
__lowerCamelCase = scount * numref
__lowerCamelCase = Counter(UpperCamelCase__ )
__lowerCamelCase = Counter()
for cgram, ccount in cgramcounter.items():
__lowerCamelCase = ccount * numref
# KEEP
__lowerCamelCase = sgramcounter_rep & cgramcounter_rep
__lowerCamelCase = keepgramcounter_rep & rgramcounter
__lowerCamelCase = sgramcounter_rep & rgramcounter
__lowerCamelCase = 0
__lowerCamelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowerCamelCase = 1
__lowerCamelCase = 1
if len(UpperCamelCase__ ) > 0:
__lowerCamelCase = keeptmpscorea / len(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
__lowerCamelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
__lowerCamelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
__lowerCamelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
__lowerCamelCase = sgramcounter_rep - cgramcounter_rep
__lowerCamelCase = delgramcounter_rep - rgramcounter
__lowerCamelCase = sgramcounter_rep - rgramcounter
__lowerCamelCase = 0
__lowerCamelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowerCamelCase = 1
if len(UpperCamelCase__ ) > 0:
__lowerCamelCase = deltmpscorea / len(UpperCamelCase__ )
# ADDITION
__lowerCamelCase = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
__lowerCamelCase = set(UpperCamelCase__ ) & set(UpperCamelCase__ )
__lowerCamelCase = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
__lowerCamelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowerCamelCase = 1
__lowerCamelCase = 1
if len(UpperCamelCase__ ) > 0:
__lowerCamelCase = addtmpscore / len(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__lowerCamelCase = addtmpscore / len(UpperCamelCase__ )
__lowerCamelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
__lowerCamelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = len(UpperCamelCase__ )
__lowerCamelCase = ssent.split(' ' )
__lowerCamelCase = csent.split(' ' )
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
for rsent in rsents:
__lowerCamelCase = rsent.split(' ' )
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
ragramslist.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
__lowerCamelCase = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
__lowerCamelCase = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
__lowerCamelCase = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
__lowerCamelCase = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
__lowerCamelCase = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
__lowerCamelCase = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
__lowerCamelCase = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
__lowerCamelCase = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
__lowerCamelCase = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(UpperCamelCase__ )
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
__lowerCamelCase = sum([delascore, delascore, delascore, delascore] ) / 4
__lowerCamelCase = sum([addascore, addascore, addascore, addascore] ) / 4
__lowerCamelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "13a" , UpperCamelCase__ : bool = True ) -> List[str]:
"""simple docstring"""
if lowercase:
__lowerCamelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
__lowerCamelCase = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase__ )()(UpperCamelCase__ )
else:
__lowerCamelCase = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase__ )
elif tokenizer == "moses":
__lowerCamelCase = sacremoses.MosesTokenizer().tokenize(UpperCamelCase__ , return_str=UpperCamelCase__ , escape=UpperCamelCase__ )
elif tokenizer == "penn":
__lowerCamelCase = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase__ , return_str=UpperCamelCase__ )
else:
__lowerCamelCase = sentence
if not return_str:
__lowerCamelCase = normalized_sent.split()
return normalized_sent
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
if not (len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == len(UpperCamelCase__ )):
raise ValueError('Sources length must match predictions and references lengths.' )
__lowerCamelCase = 0
for src, pred, refs in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
sari_score += SARIsent(normalize(UpperCamelCase__ ) , normalize(UpperCamelCase__ ) , [normalize(UpperCamelCase__ ) for sent in refs] )
__lowerCamelCase = sari_score / len(UpperCamelCase__ )
return 100 * sari_score
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str]="exp" , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Any=False , UpperCamelCase__ : Union[str, Any]=False , ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
__lowerCamelCase = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
__lowerCamelCase = sacrebleu.corpus_bleu(
UpperCamelCase__ , UpperCamelCase__ , smooth_method=UpperCamelCase__ , smooth_value=UpperCamelCase__ , force=UpperCamelCase__ , lowercase=UpperCamelCase__ , use_effective_order=UpperCamelCase__ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] , reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = {}
result.update({'sari': compute_sari(sources=lowerCamelCase__ , predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
result.update({'sacrebleu': compute_sacrebleu(predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
result.update({'exact': compute_em(predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
return result
| 354 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=32 * 8 , lowerCamelCase__=32 * 8 , lowerCamelCase__=4 , lowerCamelCase__=64 , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = is_training
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = num_queries
__lowerCamelCase = num_channels
__lowerCamelCase = min_size
__lowerCamelCase = max_size
__lowerCamelCase = num_labels
__lowerCamelCase = hidden_dim
__lowerCamelCase = hidden_dim
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
__lowerCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
__lowerCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
__lowerCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
__lowerCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__lowerCamelCase = self.num_queries
__lowerCamelCase = self.num_labels
__lowerCamelCase = [1, 1, 1, 1]
__lowerCamelCase = self.num_channels
__lowerCamelCase = 64
__lowerCamelCase = 128
__lowerCamelCase = self.hidden_dim
__lowerCamelCase = self.hidden_dim
__lowerCamelCase = self.hidden_dim
return config
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = output.encoder_hidden_states
__lowerCamelCase = output.pixel_decoder_hidden_states
__lowerCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
__lowerCamelCase = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
__lowerCamelCase = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case_ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__lowerCamelCase = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = (self.model_tester.min_size,) * 2
__lowerCamelCase = {
'pixel_values': torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
'mask_labels': torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
'class_labels': torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
__lowerCamelCase = self.model_tester.get_config()
__lowerCamelCase = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
__lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
__lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
__lowerCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__A = 1e-4
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
__lowerCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
__lowerCamelCase = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowerCamelCase = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowerCamelCase = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
__lowerCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
# masks_queries_logits
__lowerCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__lowerCamelCase = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
__lowerCamelCase = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
__lowerCamelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__lowerCamelCase = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
__lowerCamelCase = inputs['pixel_values'].to(lowerCamelCase__ )
__lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['mask_labels']]
__lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['class_labels']]
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 348 | 0 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
__lowerCamelCase = s_dict.pop(UpperCamelCase__ )
elif "subsample" in key:
__lowerCamelCase = s_dict.pop(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = emb.weight.shape
__lowerCamelCase = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
__lowerCamelCase = emb.weight.data
return lin_layer
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = torch.load(UpperCamelCase__ , map_location='cpu' )
__lowerCamelCase = mam_aaa['args']
__lowerCamelCase = mam_aaa['model']
__lowerCamelCase = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(UpperCamelCase__ )
rename_keys(UpperCamelCase__ )
__lowerCamelCase = state_dict['decoder.embed_tokens.weight'].shape[0]
__lowerCamelCase = args.share_decoder_input_output_embed
__lowerCamelCase = [int(UpperCamelCase__ ) for i in args.conv_kernel_sizes.split(',' )]
__lowerCamelCase = SpeechaTextConfig(
vocab_size=UpperCamelCase__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(UpperCamelCase__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase__ , num_beams=5 , max_length=200 , use_cache=UpperCamelCase__ , decoder_start_token_id=2 , early_stopping=UpperCamelCase__ , )
__lowerCamelCase = SpeechaTextForConditionalGeneration(UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase = model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0 and not set(UpperCamelCase__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
__lowerCamelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__lowerCamelCase = lm_head_weights
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
__A = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 355 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__A = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''mask2former'''
snake_case_ = ['''swin''']
snake_case_ = {'''hidden_size''': '''hidden_dim'''}
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 1_024 , lowerCamelCase__ = "relu" , lowerCamelCase__ = 6 , lowerCamelCase__ = 10 , lowerCamelCase__ = 8 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 2_048 , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 4 , lowerCamelCase__ = 255 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 12_544 , lowerCamelCase__ = 3.0 , lowerCamelCase__ = 0.75 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = True , lowerCamelCase__ = [4, 8, 16, 32] , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Tuple:
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowerCamelCase = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = backbone_config.pop('model_type' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(lowerCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {','.join(self.backbones_supported )}""" )
__lowerCamelCase = backbone_config
__lowerCamelCase = feature_size
__lowerCamelCase = mask_feature_size
__lowerCamelCase = hidden_dim
__lowerCamelCase = encoder_feedforward_dim
__lowerCamelCase = activation_function
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = dim_feedforward
__lowerCamelCase = pre_norm
__lowerCamelCase = enforce_input_projection
__lowerCamelCase = common_stride
__lowerCamelCase = ignore_value
__lowerCamelCase = num_queries
__lowerCamelCase = no_object_weight
__lowerCamelCase = class_weight
__lowerCamelCase = mask_weight
__lowerCamelCase = dice_weight
__lowerCamelCase = train_num_points
__lowerCamelCase = oversample_ratio
__lowerCamelCase = importance_sample_ratio
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = feature_strides
__lowerCamelCase = output_auxiliary_logits
__lowerCamelCase = decoder_layers
super().__init__(**lowerCamelCase__ )
@classmethod
def lowercase_ ( cls , lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
return cls(
backbone_config=lowerCamelCase__ , **lowerCamelCase__ , )
def lowercase_ ( self ) -> Dict[str, any]:
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 348 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=99 , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=9 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__=8 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0_02 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=0 , lowerCamelCase__=None , lowerCamelCase__=None , ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = encoder_seq_length
__lowerCamelCase = decoder_seq_length
# For common tests
__lowerCamelCase = self.decoder_seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = d_ff
__lowerCamelCase = relative_attention_num_buckets
__lowerCamelCase = dropout_rate
__lowerCamelCase = initializer_factor
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = decoder_start_token_id
__lowerCamelCase = None
__lowerCamelCase = decoder_layers
def lowercase_ ( self ) -> str:
'''simple docstring'''
return TaConfig.from_pretrained('google/umt5-base' )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , ) -> Dict:
'''simple docstring'''
if attention_mask is None:
__lowerCamelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__lowerCamelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__lowerCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCamelCase__ )
if decoder_head_mask is None:
__lowerCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase__ )
if cross_attn_head_mask is None:
__lowerCamelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__lowerCamelCase = input_ids.clamp(self.pad_token_id + 1 )
__lowerCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
__lowerCamelCase = self.get_config()
__lowerCamelCase = config.num_attention_heads
__lowerCamelCase = self.prepare_inputs_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, input_dict
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = UMTaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(
input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , )
__lowerCamelCase = model(input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
__lowerCamelCase = result.last_hidden_state
__lowerCamelCase = result.past_key_values
__lowerCamelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCamelCase__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = UMTaModel(config=lowerCamelCase__ ).get_decoder().to(lowerCamelCase__ ).eval()
# first forward pass
__lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) + 1 )
__lowerCamelCase , __lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = model(lowerCamelCase__ )['last_hidden_state']
__lowerCamelCase = model(lowerCamelCase__ , past_key_values=lowerCamelCase__ )['last_hidden_state']
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = UMTaModel(config=lowerCamelCase__ ).to(lowerCamelCase__ ).half().eval()
__lowerCamelCase = model(**lowerCamelCase__ )['last_hidden_state']
self.parent.assertFalse(torch.isnan(lowerCamelCase__ ).any().item() )
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
snake_case_ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
snake_case_ = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = True
snake_case_ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
snake_case_ = [0.8, 0.9]
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCamelCase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=lowerCamelCase__ , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = config_and_inputs[0]
__lowerCamelCase = UMTaForConditionalGeneration(lowerCamelCase__ ).eval()
model.to(lowerCamelCase__ )
__lowerCamelCase = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=lowerCamelCase__ ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase__ ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase__ ),
}
for attn_name, (name, mask) in zip(lowerCamelCase__ , head_masking.items() ):
__lowerCamelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__lowerCamelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowerCamelCase__ )
__lowerCamelCase = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=lowerCamelCase__ , return_dict_in_generate=lowerCamelCase__ , **lowerCamelCase__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__lowerCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def lowercase_ ( self ) -> str:
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=lowerCamelCase__ , legacy=lowerCamelCase__ )
__lowerCamelCase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
__lowerCamelCase = tokenizer(lowerCamelCase__ , return_tensors='pt' , padding=lowerCamelCase__ ).input_ids
# fmt: off
__lowerCamelCase = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = model.generate(input_ids.to(lowerCamelCase__ ) )
__lowerCamelCase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
__lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 356 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = 42
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase__ = 32 , lowerCamelCase__ = 64 , lowerCamelCase__ = 20 , lowerCamelCase__ = 768 , lowerCamelCase__=77 , lowerCamelCase__=4 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = "silu" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "linear" , lowerCamelCase__ = "prd" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> Tuple:
'''simple docstring'''
super().__init__()
__lowerCamelCase = num_attention_heads
__lowerCamelCase = attention_head_dim
__lowerCamelCase = num_attention_heads * attention_head_dim
__lowerCamelCase = additional_embeddings
__lowerCamelCase = time_embed_dim or inner_dim
__lowerCamelCase = embedding_proj_dim or embedding_dim
__lowerCamelCase = clip_embed_dim or embedding_dim
__lowerCamelCase = Timesteps(lowerCamelCase__ , lowerCamelCase__ , 0 )
__lowerCamelCase = TimestepEmbedding(lowerCamelCase__ , lowerCamelCase__ , out_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if embedding_proj_norm_type is None:
__lowerCamelCase = None
elif embedding_proj_norm_type == "layer":
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if encoder_hid_proj_type is None:
__lowerCamelCase = None
elif encoder_hid_proj_type == "linear":
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase__ ) )
if added_emb_type == "prd":
__lowerCamelCase = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase__ ) )
elif added_emb_type is None:
__lowerCamelCase = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
__lowerCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dropout=lowerCamelCase__ , activation_fn='gelu' , attention_bias=lowerCamelCase__ , )
for d in range(lowerCamelCase__ )
] )
if norm_in_type == "layer":
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
elif norm_in_type is None:
__lowerCamelCase = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
__lowerCamelCase = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , lowerCamelCase__ , persistent=lowerCamelCase__ )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase_ ( self ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
__lowerCamelCase = {}
def fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , 'set_processor' ):
__lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return processors
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , 'set_processor' ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
module.set_processor(lowerCamelCase__ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , ) -> int:
'''simple docstring'''
__lowerCamelCase = hidden_states.shape[0]
__lowerCamelCase = timestep
if not torch.is_tensor(lowerCamelCase__ ):
__lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
__lowerCamelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCamelCase = timesteps * torch.ones(lowerCamelCase__ , dtype=timesteps.dtype , device=timesteps.device )
__lowerCamelCase = self.time_proj(lowerCamelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__lowerCamelCase = timesteps_projected.to(dtype=self.dtype )
__lowerCamelCase = self.time_embedding(lowerCamelCase__ )
if self.embedding_proj_norm is not None:
__lowerCamelCase = self.embedding_proj_norm(lowerCamelCase__ )
__lowerCamelCase = self.embedding_proj(lowerCamelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__lowerCamelCase = self.encoder_hidden_states_proj(lowerCamelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
__lowerCamelCase = self.proj_in(lowerCamelCase__ )
__lowerCamelCase = self.positional_embedding.to(hidden_states.dtype )
__lowerCamelCase = []
__lowerCamelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__lowerCamelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__lowerCamelCase = hidden_states[:, None, :]
__lowerCamelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__lowerCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__ , -1 , -1 )
additional_embeds.append(lowerCamelCase__ )
__lowerCamelCase = torch.cat(
lowerCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__lowerCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__lowerCamelCase = F.pad(
lowerCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__lowerCamelCase = hidden_states + positional_embeddings
if attention_mask is not None:
__lowerCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
__lowerCamelCase = F.pad(lowerCamelCase__ , (0, self.additional_embeddings) , value=0.0 )
__lowerCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__lowerCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__lowerCamelCase = self.norm_in(lowerCamelCase__ )
for block in self.transformer_blocks:
__lowerCamelCase = block(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowerCamelCase = self.norm_out(lowerCamelCase__ )
if self.prd_embedding is not None:
__lowerCamelCase = hidden_states[:, -1]
else:
__lowerCamelCase = hidden_states[:, additional_embeddings_len:]
__lowerCamelCase = self.proj_to_clip_embeddings(lowerCamelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 348 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
__lowerCamelCase = DetaConfig(
backbone_config=UpperCamelCase__ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=UpperCamelCase__ , with_box_refine=UpperCamelCase__ , two_stage=UpperCamelCase__ , )
# set labels
__lowerCamelCase = 'huggingface/label-files'
if "o365" in model_name:
__lowerCamelCase = 366
__lowerCamelCase = 'object365-id2label.json'
else:
__lowerCamelCase = 91
__lowerCamelCase = 'coco-detection-id2label.json'
__lowerCamelCase = num_labels
__lowerCamelCase = json.load(open(cached_download(hf_hub_url(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) ) , 'r' ) )
__lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.reduction.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.bias""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", F"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", F"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", F"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", F"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", F"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", F"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.weight""", F"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.weight""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.weight""", F"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.bias""", F"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = dct.pop(UpperCamelCase__ )
__lowerCamelCase = val
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
__lowerCamelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCamelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
__lowerCamelCase = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[:dim, :]
__lowerCamelCase = in_proj_bias[: dim]
__lowerCamelCase = in_proj_weight[
dim : dim * 2, :
]
__lowerCamelCase = in_proj_bias[
dim : dim * 2
]
__lowerCamelCase = in_proj_weight[
-dim :, :
]
__lowerCamelCase = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Any ) -> int:
"""simple docstring"""
__lowerCamelCase = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__lowerCamelCase = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
__lowerCamelCase = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[:hidden_size, :]
__lowerCamelCase = in_proj_bias[:hidden_size]
__lowerCamelCase = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__lowerCamelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCamelCase = in_proj_weight[-hidden_size:, :]
__lowerCamelCase = in_proj_bias[-hidden_size:]
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
__lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Dict:
"""simple docstring"""
__lowerCamelCase = get_deta_config(UpperCamelCase__ )
# load original state dict
if model_name == "deta-swin-large":
__lowerCamelCase = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
__lowerCamelCase = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
__lowerCamelCase = torch.load(UpperCamelCase__ , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(UpperCamelCase__ , param.shape )
# rename keys
__lowerCamelCase = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config )
read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
if "input_proj" in key:
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
# finally, create HuggingFace model and load state dict
__lowerCamelCase = DetaForObjectDetection(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
__lowerCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(UpperCamelCase__ )
# load image processor
__lowerCamelCase = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
__lowerCamelCase = prepare_img()
__lowerCamelCase = processor(images=UpperCamelCase__ , return_tensors='pt' )
__lowerCamelCase = encoding['pixel_values']
__lowerCamelCase = model(pixel_values.to(UpperCamelCase__ ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__lowerCamelCase = torch.tensor(
[[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]] )
__lowerCamelCase = torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]] )
elif model_name == "deta-swin-large-o365":
__lowerCamelCase = torch.tensor(
[[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]] )
__lowerCamelCase = torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(UpperCamelCase__ ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(UpperCamelCase__ ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(F"""jozhang97/{model_name}""" )
processor.push_to_hub(F"""jozhang97/{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 357 |
import sys
from collections import defaultdict
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = []
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.node_position[vertex]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = pos
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase = 2 * start + 1
else:
__lowerCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase = temp, tempa
__lowerCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCamelCase__ )
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = position[index]
while index != 0:
__lowerCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase = heap[parent]
__lowerCamelCase = position[parent]
self.set_position(position[parent] , lowerCamelCase__ )
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , lowerCamelCase__ )
break
__lowerCamelCase = parent
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , 0 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = len(lowerCamelCase__ ) // 2 - 1
for i in range(lowerCamelCase__ , -1 , -1 ):
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = positions[0]
__lowerCamelCase = sys.maxsize
self.top_to_bottom(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ )
return temp
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Heap()
__lowerCamelCase = [0] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase = []
for vertex in range(len(UpperCamelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase__ )
heap.node_position.append(UpperCamelCase__ )
__lowerCamelCase = []
__lowerCamelCase = 1
__lowerCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase = 0
__lowerCamelCase = distance
heap.heapify(UpperCamelCase__ , UpperCamelCase__ )
for _ in range(1 , len(UpperCamelCase__ ) ):
__lowerCamelCase = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase__ )]
):
__lowerCamelCase = distance
heap.bottom_to_top(
UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A = int(input("Enter number of edges: ").strip())
__A = defaultdict(list)
for _ in range(edges_number):
__A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 348 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = XGLMTokenizer
snake_case_ = XGLMTokenizerFast
snake_case_ = True
snake_case_ = True
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = '<pad>'
__lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(lowerCamelCase__ ) , 1_008 )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
__lowerCamelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowerCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def lowercase_ ( self ) -> int:
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase__ , f.name )
__lowerCamelCase = XGLMTokenizer(f.name , keep_accents=lowerCamelCase__ )
__lowerCamelCase = pickle.dumps(lowerCamelCase__ )
pickle.loads(lowerCamelCase__ )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = 'I was born in 92000, and this is falsé.'
__lowerCamelCase = tokenizer.tokenize(lowerCamelCase__ )
__lowerCamelCase = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
__lowerCamelCase = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = tokenizer.encode(lowerCamelCase__ )
__lowerCamelCase = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = 'Hello World!'
__lowerCamelCase = [2, 31_227, 4_447, 35]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
__lowerCamelCase = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = {
'input_ids': [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='facebook/xglm-564M' , padding=lowerCamelCase__ , )
| 358 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
snake_case_ = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
snake_case_ = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
snake_case_ = "question"
snake_case_ = "context"
snake_case_ = "answers"
@property
def lowercase_ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 348 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"tanreinama/GPTSAN-2.8B-spout_is_uniform": (
"https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''gptsan-japanese'''
snake_case_ = [
'''past_key_values''',
]
snake_case_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowerCamelCase__=36_000 , lowerCamelCase__=1_280 , lowerCamelCase__=1_024 , lowerCamelCase__=8_192 , lowerCamelCase__=4_096 , lowerCamelCase__=128 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__=16 , lowerCamelCase__=16 , lowerCamelCase__=128 , lowerCamelCase__=0.0 , lowerCamelCase__=1e-5 , lowerCamelCase__=False , lowerCamelCase__=0.0 , lowerCamelCase__="float32" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=0.0_02 , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=35_998 , lowerCamelCase__=35_995 , lowerCamelCase__=35_999 , **lowerCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = d_model
__lowerCamelCase = d_ff
__lowerCamelCase = d_ext
__lowerCamelCase = d_spout
__lowerCamelCase = num_switch_layers
__lowerCamelCase = num_ext_layers
__lowerCamelCase = num_switch_layers + num_ext_layers
__lowerCamelCase = num_heads
__lowerCamelCase = num_experts
__lowerCamelCase = expert_capacity
__lowerCamelCase = dropout_rate
__lowerCamelCase = layer_norm_epsilon
__lowerCamelCase = router_bias
__lowerCamelCase = router_jitter_noise
__lowerCamelCase = router_dtype
__lowerCamelCase = router_ignore_padding_tokens
__lowerCamelCase = output_hidden_states
__lowerCamelCase = output_attentions
__lowerCamelCase = initializer_factor
__lowerCamelCase = output_router_logits
__lowerCamelCase = use_cache
super().__init__(
separator_token_id=lowerCamelCase__ , pad_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
| 359 |
import requests
__A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__lowerCamelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 348 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["CLIPFeatureExtractor"]
__A = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 360 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__A = logging.get_logger(__name__)
__A = TypeVar("DatasetType", Dataset, IterableDataset)
def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[List[float]] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" )
if i == 0:
__lowerCamelCase , __lowerCamelCase = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ )
else:
return _interleave_iterable_datasets(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" )
if i == 0:
__lowerCamelCase , __lowerCamelCase = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
else:
return _concatenate_iterable_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
| 348 | 0 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__A = logging.get_logger(__name__)
__A = TypeVar("DatasetType", Dataset, IterableDataset)
def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[List[float]] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" )
if i == 0:
__lowerCamelCase , __lowerCamelCase = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ )
else:
return _interleave_iterable_datasets(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" )
if i == 0:
__lowerCamelCase , __lowerCamelCase = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
else:
return _concatenate_iterable_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
| 361 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = ["model.decoder.embed_positions.weights"]
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
if "emb" in name:
__lowerCamelCase = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
__lowerCamelCase = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
__lowerCamelCase = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
__lowerCamelCase = name.replace('linear1' , 'fc1' )
if "linear2" in name:
__lowerCamelCase = name.replace('linear2' , 'fc2' )
if "norm1" in name:
__lowerCamelCase = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
__lowerCamelCase = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
__lowerCamelCase = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
__lowerCamelCase = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
__lowerCamelCase = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCamelCase = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def lowerCamelCase_ ( UpperCamelCase__ : OrderedDict , UpperCamelCase__ : int ) -> Tuple[Dict, Dict]:
"""simple docstring"""
__lowerCamelCase = list(state_dict.keys() )
__lowerCamelCase = {}
for key in keys:
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = rename_keys(UpperCamelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCamelCase = val[:hidden_size, :]
__lowerCamelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCamelCase = val
else:
__lowerCamelCase = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
__lowerCamelCase = 1024
__lowerCamelCase = 24
__lowerCamelCase = 16
elif checkpoint == "medium":
__lowerCamelCase = 1536
__lowerCamelCase = 48
__lowerCamelCase = 24
elif checkpoint == "large":
__lowerCamelCase = 2048
__lowerCamelCase = 48
__lowerCamelCase = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
__lowerCamelCase = MusicgenDecoderConfig(
hidden_size=UpperCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , )
return config
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[int]="cpu" ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = MusicGen.get_pretrained(UpperCamelCase__ , device=UpperCamelCase__ )
__lowerCamelCase = decoder_config_from_checkpoint(UpperCamelCase__ )
__lowerCamelCase = fairseq_model.lm.state_dict()
__lowerCamelCase , __lowerCamelCase = rename_state_dict(
UpperCamelCase__ , hidden_size=decoder_config.hidden_size )
__lowerCamelCase = TaEncoderModel.from_pretrained('t5-base' )
__lowerCamelCase = EncodecModel.from_pretrained('facebook/encodec_32khz' )
__lowerCamelCase = MusicgenForCausalLM(UpperCamelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCamelCase , __lowerCamelCase = decoder.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
__lowerCamelCase = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase__ , audio_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase__ )
# check we can do a forward pass
__lowerCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCamelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCamelCase = model(input_ids=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
__lowerCamelCase = AutoTokenizer.from_pretrained('t5-base' )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
__lowerCamelCase = MusicgenProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# set the appropriate bos/pad token ids
__lowerCamelCase = 2048
__lowerCamelCase = 2048
# set other default generation config params
__lowerCamelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCamelCase = True
__lowerCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(UpperCamelCase__ )
processor.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
__A = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 348 | 0 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__A = logging.getLogger(__name__)
__A = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__A = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
snake_case_ = field(
default=__magic_name__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
snake_case_ = field(
default=__magic_name__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__magic_name__ )} , )
snake_case_ = field(
default=__magic_name__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
snake_case_ = field(
default=__magic_name__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
snake_case_ = field(
default=__magic_name__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
snake_case_ = field(
default=__magic_name__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
snake_case_ = field(
default=__magic_name__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
snake_case_ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
snake_case_ = field(
default=__magic_name__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
snake_case_ = field(
default=__magic_name__ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
snake_case_ = field(
default=__magic_name__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
snake_case_ = field(default=__magic_name__ , metadata={'''help''': '''The input training data file (a text file).'''} )
snake_case_ = field(
default=__magic_name__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
snake_case_ = field(
default=__magic_name__ , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
snake_case_ = field(
default=__magic_name__ , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
snake_case_ = field(
default=__magic_name__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
snake_case_ = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
snake_case_ = field(
default=__magic_name__ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
snake_case_ = field(
default=__magic_name__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
snake_case_ = field(
default=0.1_5 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
snake_case_ = field(
default=__magic_name__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
if self.train_file is not None:
__lowerCamelCase = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__lowerCamelCase = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ) -> List[Any]:
"""simple docstring"""
with open(UpperCamelCase__ , 'r' , encoding='utf-8' ) as f:
__lowerCamelCase = [json.loads(UpperCamelCase__ ) for line in f.read().splitlines() if (len(UpperCamelCase__ ) > 0 and not line.isspace())]
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
__lowerCamelCase = {c: dataset[c] for c in dataset.column_names}
__lowerCamelCase = refs
return Dataset.from_dict(UpperCamelCase__ )
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCamelCase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCamelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
__lowerCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
__lowerCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
__lowerCamelCase = {}
if data_args.train_file is not None:
__lowerCamelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCamelCase = data_args.validation_file
__lowerCamelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
__lowerCamelCase = 'text'
__lowerCamelCase = load_dataset(UpperCamelCase__ , data_files=UpperCamelCase__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowerCamelCase = AutoConfig.from_pretrained(model_args.config_name , **UpperCamelCase__ )
elif model_args.model_name_or_path:
__lowerCamelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCamelCase__ )
else:
__lowerCamelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
__lowerCamelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__lowerCamelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **UpperCamelCase__ )
elif model_args.model_name_or_path:
__lowerCamelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **UpperCamelCase__ )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
__lowerCamelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
__lowerCamelCase = AutoModelForMaskedLM.from_config(UpperCamelCase__ )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__lowerCamelCase = datasets['train'].column_names
else:
__lowerCamelCase = datasets['validation'].column_names
__lowerCamelCase = 'text' if 'text' in column_names else column_names[0]
__lowerCamelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(UpperCamelCase__ : Tuple ):
# Remove empty lines
__lowerCamelCase = [line for line in examples['text'] if len(UpperCamelCase__ ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=data_args.max_seq_length )
__lowerCamelCase = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__lowerCamelCase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__lowerCamelCase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__lowerCamelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__lowerCamelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
__lowerCamelCase = DataCollatorForWholeWordMask(tokenizer=UpperCamelCase__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowerCamelCase = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCamelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__lowerCamelCase = model_args.model_name_or_path
else:
__lowerCamelCase = None
__lowerCamelCase = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCamelCase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
__lowerCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCamelCase = trainer.evaluate()
__lowerCamelCase = math.exp(eval_output['eval_loss'] )
__lowerCamelCase = perplexity
__lowerCamelCase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> str:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 362 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''sew-d'''
def __init__( self , lowerCamelCase__=32 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__=2 , lowerCamelCase__=512 , lowerCamelCase__=256 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=("p2c", "c2p") , lowerCamelCase__="layer_norm" , lowerCamelCase__="gelu_python" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-7 , lowerCamelCase__=1e-5 , lowerCamelCase__="group" , lowerCamelCase__="gelu" , lowerCamelCase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCamelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase__=False , lowerCamelCase__=128 , lowerCamelCase__=16 , lowerCamelCase__=True , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__="mean" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_norm
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = squeeze_factor
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = position_buckets
__lowerCamelCase = share_att_key
__lowerCamelCase = relative_attention
__lowerCamelCase = norm_rel_ebd
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = feature_layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# sequence classification
__lowerCamelCase = use_weighted_layer_sum
__lowerCamelCase = classifier_proj_size
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 348 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = (PNDMScheduler,)
snake_case_ = (('''num_inference_steps''', 50),)
def lowercase_ ( self , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = {
'num_train_timesteps': 1_000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**lowerCamelCase__ )
return config
def lowercase_ ( self , lowerCamelCase__=0 , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = dict(self.forward_default_kwargs )
__lowerCamelCase = kwargs.pop('num_inference_steps' , lowerCamelCase__ )
__lowerCamelCase = self.dummy_sample
__lowerCamelCase = 0.1 * sample
__lowerCamelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__lowerCamelCase = self.get_scheduler_config(**lowerCamelCase__ )
__lowerCamelCase = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
__lowerCamelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
__lowerCamelCase = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
__lowerCamelCase = dummy_past_residuals[:]
__lowerCamelCase = scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
__lowerCamelCase = new_scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowerCamelCase = scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
__lowerCamelCase = new_scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def lowercase_ ( self , lowerCamelCase__=0 , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = dict(self.forward_default_kwargs )
__lowerCamelCase = kwargs.pop('num_inference_steps' , lowerCamelCase__ )
__lowerCamelCase = self.dummy_sample
__lowerCamelCase = 0.1 * sample
__lowerCamelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCamelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
__lowerCamelCase = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
__lowerCamelCase = dummy_past_residuals[:]
__lowerCamelCase = scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
__lowerCamelCase = new_scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowerCamelCase = scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
__lowerCamelCase = new_scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase_ ( self , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config(**lowerCamelCase__ )
__lowerCamelCase = scheduler_class(**lowerCamelCase__ )
__lowerCamelCase = 10
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
__lowerCamelCase = model(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
__lowerCamelCase = model(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
return sample
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = dict(self.forward_default_kwargs )
__lowerCamelCase = kwargs.pop('num_inference_steps' , lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**lowerCamelCase__ )
__lowerCamelCase = self.dummy_sample
__lowerCamelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ , 'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ , 'set_timesteps' ):
__lowerCamelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowerCamelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__lowerCamelCase = dummy_past_residuals[:]
__lowerCamelCase = scheduler.step_prk(lowerCamelCase__ , 0 , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
__lowerCamelCase = scheduler.step_prk(lowerCamelCase__ , 1 , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__lowerCamelCase = scheduler.step_plms(lowerCamelCase__ , 0 , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
__lowerCamelCase = scheduler.step_plms(lowerCamelCase__ , 1 , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config(steps_offset=1 )
__lowerCamelCase = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase__ , beta_end=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def lowercase_ ( self ) -> int:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = 27
for scheduler_class in self.scheduler_classes:
__lowerCamelCase = self.dummy_sample
__lowerCamelCase = 0.1 * sample
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
__lowerCamelCase = scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
def lowercase_ ( self ) -> str:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**lowerCamelCase__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.full_loop()
__lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) )
__lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 198.1_318 ) < 1e-2
assert abs(result_mean.item() - 0.25_80 ) < 1e-3
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = self.full_loop(prediction_type='v_prediction' )
__lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) )
__lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1e-2
assert abs(result_mean.item() - 0.08_78 ) < 1e-3
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.full_loop(set_alpha_to_one=lowerCamelCase__ , beta_start=0.01 )
__lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) )
__lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 230.0_399 ) < 1e-2
assert abs(result_mean.item() - 0.29_95 ) < 1e-3
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.full_loop(set_alpha_to_one=lowerCamelCase__ , beta_start=0.01 )
__lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) )
__lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 186.9_482 ) < 1e-2
assert abs(result_mean.item() - 0.24_34 ) < 1e-3
| 363 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__A = logging.get_logger("transformers.models.speecht5")
__A = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__A = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__A = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__A = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__A = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__A = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__A = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__A = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__A = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A = []
__A = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> Dict:
"""simple docstring"""
for attribute in key.split('.' ):
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
elif weight_type == "running_mean":
__lowerCamelCase = value
elif weight_type == "running_var":
__lowerCamelCase = value
elif weight_type == "num_batches_tracked":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ) -> Any:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__lowerCamelCase , __lowerCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = []
if task == "s2t":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2T
__lowerCamelCase = IGNORE_KEYS_S2T
elif task == "t2s":
__lowerCamelCase = None
__lowerCamelCase = MAPPING_T2S
__lowerCamelCase = IGNORE_KEYS_T2S
elif task == "s2s":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2S
__lowerCamelCase = IGNORE_KEYS_S2S
else:
raise ValueError(F"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase__ , UpperCamelCase__ ):
logger.info(F"""{name} was ignored""" )
continue
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__lowerCamelCase , __lowerCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
__lowerCamelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2]
__lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ )
if "weight_g" in name:
__lowerCamelCase = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase = 'weight_v'
elif "bias" in name:
__lowerCamelCase = 'bias'
elif "weight" in name:
__lowerCamelCase = 'weight'
elif "running_mean" in name:
__lowerCamelCase = 'running_mean'
elif "running_var" in name:
__lowerCamelCase = 'running_var'
elif "num_batches_tracked" in name:
__lowerCamelCase = 'num_batches_tracked'
else:
__lowerCamelCase = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = full_name.split('conv_layers.' )[-1]
__lowerCamelCase = name.split('.' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=None , ) -> Tuple:
"""simple docstring"""
if config_path is not None:
__lowerCamelCase = SpeechTaConfig.from_pretrained(UpperCamelCase__ )
else:
__lowerCamelCase = SpeechTaConfig()
if task == "s2t":
__lowerCamelCase = config.max_text_positions
__lowerCamelCase = SpeechTaForSpeechToText(UpperCamelCase__ )
elif task == "t2s":
__lowerCamelCase = 1876
__lowerCamelCase = 600
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForTextToSpeech(UpperCamelCase__ )
elif task == "s2s":
__lowerCamelCase = 1876
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForSpeechToSpeech(UpperCamelCase__ )
else:
raise ValueError(F"""Unknown task name: {task}""" )
if vocab_path:
__lowerCamelCase = SpeechTaTokenizer(UpperCamelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken('<mask>' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
__lowerCamelCase = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
__lowerCamelCase = SpeechTaFeatureExtractor()
__lowerCamelCase = SpeechTaProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
__lowerCamelCase = torch.load(UpperCamelCase__ )
recursively_load_weights(fairseq_checkpoint['model'] , UpperCamelCase__ , UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 348 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__A = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = feature_size
__lowerCamelCase = sampling_rate
__lowerCamelCase = padding_value
__lowerCamelCase = kwargs.pop('padding_side' , 'right' )
__lowerCamelCase = kwargs.pop('return_attention_mask' , lowerCamelCase__ )
super().__init__(**lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> BatchFeature:
'''simple docstring'''
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__lowerCamelCase = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
f""" to this method that includes {self.model_input_names[0]}, but you provided"""
f""" {list(processed_features.keys() )}""" )
__lowerCamelCase = processed_features[self.model_input_names[0]]
__lowerCamelCase = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase__ ) == 0:
if return_attention_mask:
__lowerCamelCase = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__lowerCamelCase = required_input[0]
if isinstance(lowerCamelCase__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__lowerCamelCase = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase__ ):
__lowerCamelCase = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase__ ):
__lowerCamelCase = 'tf'
elif is_torch_tensor(lowerCamelCase__ ):
__lowerCamelCase = 'pt'
elif isinstance(lowerCamelCase__ , (int, float, list, tuple, np.ndarray) ):
__lowerCamelCase = 'np'
else:
raise ValueError(
f"""type of {first_element} unknown: {type(lowerCamelCase__ )}. """
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__lowerCamelCase = to_numpy(lowerCamelCase__ )
else:
__lowerCamelCase = [to_numpy(lowerCamelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
__lowerCamelCase = self._get_padding_strategies(padding=lowerCamelCase__ , max_length=lowerCamelCase__ )
__lowerCamelCase = processed_features[self.model_input_names[0]]
__lowerCamelCase = len(lowerCamelCase__ )
if not all(len(lowerCamelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
__lowerCamelCase = []
for i in range(lowerCamelCase__ ):
__lowerCamelCase = {k: v[i] for k, v in processed_features.items()}
# truncation
__lowerCamelCase = self._truncate(
lowerCamelCase__ , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , truncation=lowerCamelCase__ , )
truncated_inputs.append(lowerCamelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__lowerCamelCase = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__lowerCamelCase = PaddingStrategy.MAX_LENGTH
__lowerCamelCase = {}
for i in range(lowerCamelCase__ ):
# padding
__lowerCamelCase = self._pad(
truncated_inputs[i] , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
for key, value in outputs.items():
if key not in batch_outputs:
__lowerCamelCase = []
if value.dtype is np.dtype(np.floataa ):
__lowerCamelCase = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase__ )
return BatchFeature(lowerCamelCase__ , tensor_type=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> dict:
'''simple docstring'''
__lowerCamelCase = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__lowerCamelCase = len(lowerCamelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__lowerCamelCase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__lowerCamelCase = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__lowerCamelCase = np.ones(len(lowerCamelCase__ ) , dtype=np.intaa )
if needs_to_be_padded:
__lowerCamelCase = max_length - len(lowerCamelCase__ )
if self.padding_side == "right":
if return_attention_mask:
__lowerCamelCase = np.pad(
processed_features['attention_mask'] , (0, difference) )
__lowerCamelCase = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__lowerCamelCase = np.pad(
lowerCamelCase__ , lowerCamelCase__ , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__lowerCamelCase = np.pad(
processed_features['attention_mask'] , (difference, 0) )
__lowerCamelCase = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__lowerCamelCase = np.pad(
lowerCamelCase__ , lowerCamelCase__ , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> str:
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
__lowerCamelCase = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__lowerCamelCase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__lowerCamelCase = len(lowerCamelCase__ ) > max_length
if needs_to_be_truncated:
__lowerCamelCase = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__lowerCamelCase = processed_features['attention_mask'][:max_length]
return processed_features
def lowercase_ ( self , lowerCamelCase__=False , lowerCamelCase__=None ) -> Any:
'''simple docstring'''
# Get padding strategy
if padding is not False:
if padding is True:
__lowerCamelCase = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = PaddingStrategy(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = padding
else:
__lowerCamelCase = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 364 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = [False] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ )
def dfs(UpperCamelCase__ : int , UpperCamelCase__ : int ):
__lowerCamelCase = True
__lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase__ , 1 - c )
for i in range(len(UpperCamelCase__ ) ):
if not visited[i]:
dfs(UpperCamelCase__ , 0 )
for i in range(len(UpperCamelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__A = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 348 | 0 |
import sys
from collections import defaultdict
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = []
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.node_position[vertex]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = pos
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase = 2 * start + 1
else:
__lowerCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase = temp, tempa
__lowerCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCamelCase__ )
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = position[index]
while index != 0:
__lowerCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase = heap[parent]
__lowerCamelCase = position[parent]
self.set_position(position[parent] , lowerCamelCase__ )
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , lowerCamelCase__ )
break
__lowerCamelCase = parent
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , 0 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = len(lowerCamelCase__ ) // 2 - 1
for i in range(lowerCamelCase__ , -1 , -1 ):
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = positions[0]
__lowerCamelCase = sys.maxsize
self.top_to_bottom(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ )
return temp
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Heap()
__lowerCamelCase = [0] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase = []
for vertex in range(len(UpperCamelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase__ )
heap.node_position.append(UpperCamelCase__ )
__lowerCamelCase = []
__lowerCamelCase = 1
__lowerCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase = 0
__lowerCamelCase = distance
heap.heapify(UpperCamelCase__ , UpperCamelCase__ )
for _ in range(1 , len(UpperCamelCase__ ) ):
__lowerCamelCase = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase__ )]
):
__lowerCamelCase = distance
heap.bottom_to_top(
UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A = int(input("Enter number of edges: ").strip())
__A = defaultdict(list)
for _ in range(edges_number):
__A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 365 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
__lowerCamelCase = tokenizer('Hello there' , return_tensors='pt' ).input_ids
__lowerCamelCase = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__lowerCamelCase = model(input_ids.to(lowerCamelCase__ ) , labels=labels.to(lowerCamelCase__ ) ).loss
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 348 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''char'''
snake_case_ = '''bpe'''
snake_case_ = '''wp'''
__A = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = ['''image_processor''', '''char_tokenizer''']
snake_case_ = '''ViTImageProcessor'''
snake_case_ = '''MgpstrTokenizer'''
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCamelCase__ , )
__lowerCamelCase = kwargs.pop('feature_extractor' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
__lowerCamelCase = tokenizer
__lowerCamelCase = AutoTokenizer.from_pretrained('gpt2' )
__lowerCamelCase = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCamelCase = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None:
__lowerCamelCase = self.char_tokenizer(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase = encodings['input_ids']
return inputs
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = sequences
__lowerCamelCase = char_preds.size(0 )
__lowerCamelCase , __lowerCamelCase = self._decode_helper(lowerCamelCase__ , 'char' )
__lowerCamelCase , __lowerCamelCase = self._decode_helper(lowerCamelCase__ , 'bpe' )
__lowerCamelCase , __lowerCamelCase = self._decode_helper(lowerCamelCase__ , 'wp' )
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(lowerCamelCase__ ):
__lowerCamelCase = [char_scores[i], bpe_scores[i], wp_scores[i]]
__lowerCamelCase = [char_strs[i], bpe_strs[i], wp_strs[i]]
__lowerCamelCase = scores.index(max(lowerCamelCase__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__lowerCamelCase = {}
__lowerCamelCase = final_strs
__lowerCamelCase = final_scores
__lowerCamelCase = char_strs
__lowerCamelCase = bpe_strs
__lowerCamelCase = wp_strs
return out
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
__lowerCamelCase = self.char_decode
__lowerCamelCase = 1
__lowerCamelCase = '[s]'
elif format == DecodeType.BPE:
__lowerCamelCase = self.bpe_decode
__lowerCamelCase = 2
__lowerCamelCase = '#'
elif format == DecodeType.WORDPIECE:
__lowerCamelCase = self.wp_decode
__lowerCamelCase = 102
__lowerCamelCase = '[SEP]'
else:
raise ValueError(f"""Format {format} is not supported.""" )
__lowerCamelCase , __lowerCamelCase = [], []
__lowerCamelCase = pred_logits.size(0 )
__lowerCamelCase = pred_logits.size(1 )
__lowerCamelCase , __lowerCamelCase = pred_logits.topk(1 , dim=-1 , largest=lowerCamelCase__ , sorted=lowerCamelCase__ )
__lowerCamelCase = preds_index.view(-1 , lowerCamelCase__ )[:, 1:]
__lowerCamelCase = decoder(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase = torch.nn.functional.softmax(lowerCamelCase__ , dim=2 ).max(dim=2 )
__lowerCamelCase = preds_max_prob[:, 1:]
for index in range(lowerCamelCase__ ):
__lowerCamelCase = preds_str[index].find(lowerCamelCase__ )
__lowerCamelCase = preds_str[index][:pred_eos]
__lowerCamelCase = preds_index[index].cpu().tolist()
__lowerCamelCase = pred_index.index(lowerCamelCase__ ) if eos_token in pred_index else -1
__lowerCamelCase = preds_max_prob[index][: pred_eos_index + 1]
__lowerCamelCase = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowerCamelCase__ )
conf_scores.append(lowerCamelCase__ )
return dec_strs, conf_scores
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(lowerCamelCase__ )]
return decode_strs
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(lowerCamelCase__ )]
return decode_strs
| 366 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
__lowerCamelCase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
__lowerCamelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCamelCase = [4, 4, 4, 4]
__lowerCamelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
else:
__lowerCamelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCamelCase = 96
elif "small" in model_name:
__lowerCamelCase = 96
elif "base" in model_name:
__lowerCamelCase = 128
elif "large" in model_name:
__lowerCamelCase = 192
elif "xlarge" in model_name:
__lowerCamelCase = 256
elif "huge" in model_name:
__lowerCamelCase = 352
# set label information
__lowerCamelCase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowerCamelCase = 'imagenet-22k-id2label.json'
else:
__lowerCamelCase = 'imagenet-1k-id2label.json'
__lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = FocalNetConfig(
embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , focal_levels=UpperCamelCase__ , focal_windows=UpperCamelCase__ , use_conv_embed=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , use_post_layernorm=UpperCamelCase__ , use_layerscale=UpperCamelCase__ , )
return config
def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> str:
"""simple docstring"""
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowerCamelCase = 'encoder.' + name
if "encoder.layers" in name:
__lowerCamelCase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowerCamelCase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowerCamelCase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCamelCase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCamelCase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCamelCase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowerCamelCase = 'layernorm.weight'
if name == "norm.bias":
__lowerCamelCase = 'layernorm.bias'
if "head" in name:
__lowerCamelCase = name.replace('head' , 'classifier' )
else:
__lowerCamelCase = 'focalnet.' + name
return name
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Dict:
"""simple docstring"""
__lowerCamelCase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowerCamelCase = model_name_to_url[model_name]
print('Checkpoint URL: ' , UpperCamelCase__ )
__lowerCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
__lowerCamelCase = get_focalnet_config(UpperCamelCase__ )
__lowerCamelCase = FocalNetForImageClassification(UpperCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(UpperCamelCase__ )
# verify conversion
__lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase = BitImageProcessor(
do_resize=UpperCamelCase__ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ , crop_size=224 , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , )
__lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
__lowerCamelCase = processor(images=UpperCamelCase__ , return_tensors='pt' )
__lowerCamelCase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
__lowerCamelCase = image_transforms(UpperCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCamelCase__ , atol=1E-4 )
__lowerCamelCase = model(**UpperCamelCase__ )
__lowerCamelCase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
__lowerCamelCase = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
__lowerCamelCase = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
__lowerCamelCase = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
__lowerCamelCase = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
__lowerCamelCase = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
__A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 348 | 0 |
"""simple docstring"""
import math
__A = 10
__A = 7
__A = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCamelCase_ ( UpperCamelCase__ : int = 20 ) -> str:
"""simple docstring"""
__lowerCamelCase = math.comb(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCamelCase__ )
__lowerCamelCase = NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 367 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : list[float] , UpperCamelCase__ : list[float] ) -> float:
"""simple docstring"""
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase , __lowerCamelCase = divmod(len(UpperCamelCase__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = [float(x) for x in input("Enter the elements of first array: ").split()]
__A = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 348 | 0 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
__A = TypeVar("T")
__A = Union[List[T], Tuple[T, ...]]
__A = Union[T, List[T], Dict[str, T]]
__A = Union[str, bytes, os.PathLike]
| 368 |
__A = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.3_5_5_8_1_8,
}
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowerCamelCase = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(UpperCamelCase__ )}"""
)
raise ValueError(UpperCamelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
__lowerCamelCase = tokenizer('Hello there' , return_tensors='pt' ).input_ids
__lowerCamelCase = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__lowerCamelCase = model(input_ids.to(lowerCamelCase__ ) , labels=labels.to(lowerCamelCase__ ) ).loss
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 369 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''philschmid/bart-large-cnn-samsum'''
snake_case_ = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
snake_case_ = '''summarizer'''
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSeqaSeqLM
snake_case_ = ['''text''']
snake_case_ = ['''text''']
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.pre_processor(lowerCamelCase__ , return_tensors='pt' , truncation=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return self.model.generate(**lowerCamelCase__ )[0]
def lowercase_ ( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
return self.pre_processor.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
| 348 | 0 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.dummy_uncond_unet
__lowerCamelCase = KarrasVeScheduler()
__lowerCamelCase = KarrasVePipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(num_inference_steps=2 , generator=lowerCamelCase__ , output_type='numpy' ).images
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(num_inference_steps=2 , generator=lowerCamelCase__ , output_type='numpy' , return_dict=lowerCamelCase__ )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = 'google/ncsnpp-celebahq-256'
__lowerCamelCase = UNetaDModel.from_pretrained(lowerCamelCase__ )
__lowerCamelCase = KarrasVeScheduler()
__lowerCamelCase = KarrasVePipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(num_inference_steps=20 , generator=lowerCamelCase__ , output_type='numpy' ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__lowerCamelCase = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 370 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=4 , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = True
__lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = True
snake_case_ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__lowerCamelCase = model(lowerCamelCase__ )[0]
__lowerCamelCase = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
__lowerCamelCase = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__lowerCamelCase = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
__lowerCamelCase = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 348 | 0 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__A = logging.get_logger(__name__)
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> Any:
"""simple docstring"""
__lowerCamelCase = nn.functional.normalize(UpperCamelCase__ )
__lowerCamelCase = nn.functional.normalize(UpperCamelCase__ )
return torch.mm(UpperCamelCase__ , normalized_text_embeds.t() )
class __lowerCAmelCase ( __magic_name__ ):
snake_case_ = CLIPConfig
snake_case_ = ['''CLIPEncoderLayer''']
def __init__( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__(lowerCamelCase__ )
__lowerCamelCase = CLIPVisionModel(config.vision_config )
__lowerCamelCase = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCamelCase__ )
__lowerCamelCase = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCamelCase__ )
__lowerCamelCase = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCamelCase__ )
__lowerCamelCase = nn.Parameter(torch.ones(17 ) , requires_grad=lowerCamelCase__ )
__lowerCamelCase = nn.Parameter(torch.ones(3 ) , requires_grad=lowerCamelCase__ )
@torch.no_grad()
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = self.vision_model(lowerCamelCase__ )[1] # pooled_output
__lowerCamelCase = self.visual_projection(lowerCamelCase__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowerCamelCase = cosine_distance(lowerCamelCase__ , self.special_care_embeds ).cpu().float().numpy()
__lowerCamelCase = cosine_distance(lowerCamelCase__ , self.concept_embeds ).cpu().float().numpy()
__lowerCamelCase = []
__lowerCamelCase = image_embeds.shape[0]
for i in range(lowerCamelCase__ ):
__lowerCamelCase = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
__lowerCamelCase = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
__lowerCamelCase = special_cos_dist[i][concept_idx]
__lowerCamelCase = self.special_care_embeds_weights[concept_idx].item()
__lowerCamelCase = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
__lowerCamelCase = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
__lowerCamelCase = cos_dist[i][concept_idx]
__lowerCamelCase = self.concept_embeds_weights[concept_idx].item()
__lowerCamelCase = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCamelCase__ )
result.append(lowerCamelCase__ )
__lowerCamelCase = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self.vision_model(lowerCamelCase__ )[1] # pooled_output
__lowerCamelCase = self.visual_projection(lowerCamelCase__ )
__lowerCamelCase = cosine_distance(lowerCamelCase__ , self.special_care_embeds )
__lowerCamelCase = cosine_distance(lowerCamelCase__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
__lowerCamelCase = 0.0
__lowerCamelCase = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
__lowerCamelCase = torch.any(special_scores > 0 , dim=1 )
__lowerCamelCase = special_care * 0.01
__lowerCamelCase = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
__lowerCamelCase = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
__lowerCamelCase = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 371 |
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
| 348 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class lowercase :
"""simple docstring"""
UpperCAmelCase = field(
metadata={"""help""": """The output directory where the model will be written."""} , )
UpperCAmelCase = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} , )
UpperCAmelCase = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} , )
UpperCAmelCase = field(
default=_lowerCamelCase , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
UpperCAmelCase = field(
default=_lowerCamelCase , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def snake_case_ ( )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = HfArgumentParser((ModelArguments,) )
((_UpperCAmelCase) ,) : int = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
_UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
_UpperCAmelCase : Tuple = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
_UpperCAmelCase : Tuple = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
_UpperCAmelCase : int = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : Tuple = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowerCAmelCase_ , decoder_config=lowerCAmelCase_ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
_UpperCAmelCase : Any = decoder_config.decoder_start_token_id
_UpperCAmelCase : Any = decoder_config.pad_token_id
if decoder_start_token_id is None:
_UpperCAmelCase : int = decoder_config.bos_token_id
if pad_token_id is None:
_UpperCAmelCase : Optional[Any] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
_UpperCAmelCase : Any = decoder_config.eos_token_id
_UpperCAmelCase : str = decoder_start_token_id
_UpperCAmelCase : List[str] = pad_token_id
_UpperCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
_UpperCAmelCase : str = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
_UpperCAmelCase : Any = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 349 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case_ ( )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowerCAmelCase_ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowerCAmelCase_ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowerCAmelCase_ )
return parser.parse_args()
def snake_case_ ( )-> str:
'''simple docstring'''
_UpperCAmelCase : List[str] = parse_args()
# Import training_script as a module.
_UpperCAmelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCAmelCase : Optional[Any] = script_fpath.stem
_UpperCAmelCase : List[str] = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
_UpperCAmelCase : Dict = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 349 | 1 |
'''simple docstring'''
from datetime import datetime
import requests
def snake_case_ ( lowerCAmelCase_ )-> bytes:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
_UpperCAmelCase : Dict = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(lowerCAmelCase_ ).content
if __name__ == "__main__":
A_ : Union[str, Any] = input("""Enter Video/IGTV url: """).strip()
A_ : Dict = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 349 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("""only integers accepted as input""" )
else:
_UpperCAmelCase : Dict = str(abs(lowerCAmelCase_ ) )
_UpperCAmelCase : Optional[Any] = [list(lowerCAmelCase_ ) for char in range(len(lowerCAmelCase_ ) )]
for index in range(len(lowerCAmelCase_ ) ):
num_transpositions[index].pop(lowerCAmelCase_ )
return max(
int("""""".join(list(lowerCAmelCase_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 349 | 1 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
A_ : List[Any] = 637_8137.0
A_ : Dict = 635_6752.31_4245
A_ : int = 6_3_7_8_1_3_7
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> float:
'''simple docstring'''
_UpperCAmelCase : Tuple = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_UpperCAmelCase : Any = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
_UpperCAmelCase : Optional[Any] = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_UpperCAmelCase : Union[str, Any] = haversine_distance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_UpperCAmelCase : Optional[int] = (b_lata + b_lata) / 2
_UpperCAmelCase : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_UpperCAmelCase : List[str] = (sin(lowerCAmelCase_ ) ** 2) * (cos(lowerCAmelCase_ ) ** 2)
_UpperCAmelCase : Union[str, Any] = cos(sigma / 2 ) ** 2
_UpperCAmelCase : Dict = (sigma - sin(lowerCAmelCase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_UpperCAmelCase : Union[str, Any] = (cos(lowerCAmelCase_ ) ** 2) * (sin(lowerCAmelCase_ ) ** 2)
_UpperCAmelCase : Union[str, Any] = sin(sigma / 2 ) ** 2
_UpperCAmelCase : Optional[Any] = (sigma + sin(lowerCAmelCase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A_ : Dict = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> None:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), F'''{len(lowerCAmelCase_ )} != {len(lowerCAmelCase_ )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A_ : Union[str, Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A_ : int = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
try:
_UpperCAmelCase : Any = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[int]:
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(lowerCAmelCase_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ = "student" , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , )-> Tuple[PreTrainedModel, List[int], List[int]]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
AutoTokenizer.from_pretrained(lowerCAmelCase_ ).save_pretrained(lowerCAmelCase_ ) # purely for convenience
_UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ ).eval()
else:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), F'''teacher must be a model or string got type {type(lowerCAmelCase_ )}'''
_UpperCAmelCase : str = teacher.config.to_diff_dict()
try:
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCAmelCase : Tuple = teacher_e
if d is None:
_UpperCAmelCase : Dict = teacher_d
init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} )
except AttributeError: # T5
if hasattr(teacher.config , """num_encoder_layers""" ):
_UpperCAmelCase ,_UpperCAmelCase : int = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCAmelCase ,_UpperCAmelCase : int = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCAmelCase : List[str] = teacher_e
if d is None:
_UpperCAmelCase : str = teacher_d
if hasattr(teacher.config , """num_encoder_layers""" ):
init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} )
else:
init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase_ )
# Copy weights
_UpperCAmelCase : Any = teacher.config_class(**lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCAmelCase : Optional[Any] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = list(range(lowerCAmelCase_ ) ), list(range(lowerCAmelCase_ ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(lowerCAmelCase_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase_ , lowerCAmelCase_ )
if d_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase_ , lowerCAmelCase_ )
try:
if hasattr(
lowerCAmelCase_ , """prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase_ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase_ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase_ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase_ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase_ )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
_UpperCAmelCase : Dict = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 349 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 100 , )-> float:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = x_start
_UpperCAmelCase : List[Any] = fnc(lowerCAmelCase_ )
_UpperCAmelCase : int = 0.0
for _ in range(lowerCAmelCase_ ):
# Approximates curve as a sequence of linear lines and sums their length
_UpperCAmelCase : Optional[int] = (x_end - x_start) / steps + xa
_UpperCAmelCase : int = fnc(lowerCAmelCase_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
_UpperCAmelCase : Any = xa
_UpperCAmelCase : str = fxa
return length
if __name__ == "__main__":
def snake_case_ ( lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
A_ : int = 1_0
while i <= 1_0_0_0_0_0:
print(f"""With {i} steps: {line_length(f, -1_0, 1_0, i)}""")
i *= 1_0
| 349 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = right or len(lowerCAmelCase_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowerCAmelCase_ , lowerCAmelCase_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : int = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""google/mt5-small""" )
_UpperCAmelCase : List[str] = tokenizer("""Hello there""" ,return_tensors="""np""" ).input_ids
_UpperCAmelCase : List[Any] = tokenizer("""Hi I am""" ,return_tensors="""np""" ).input_ids
_UpperCAmelCase : Any = shift_tokens_right(a_ ,model.config.pad_token_id ,model.config.decoder_start_token_id )
_UpperCAmelCase : int = model(a_ ,decoder_input_ids=a_ ).logits
_UpperCAmelCase : Dict = optax.softmax_cross_entropy(a_ ,onehot(a_ ,logits.shape[-1] ) ).mean()
_UpperCAmelCase : Optional[Any] = -(labels.shape[-1] * loss.item())
_UpperCAmelCase : Dict = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 349 |
'''simple docstring'''
from datetime import datetime
import requests
def snake_case_ ( lowerCAmelCase_ )-> bytes:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
_UpperCAmelCase : Dict = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(lowerCAmelCase_ ).content
if __name__ == "__main__":
A_ : Union[str, Any] = input("""Enter Video/IGTV url: """).strip()
A_ : Dict = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 349 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case_ ( lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@staticmethod
def _snake_case ( a_ ) -> Union[str, Any]:
_UpperCAmelCase : Dict = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" ,type=a_ ,default=a_ ,help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" ,action="""store_true""" ,help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" ,action="""store_true""" ,help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" ,)
download_parser.add_argument("""model""" ,type=a_ ,help="""Name of the model to download""" )
download_parser.set_defaults(func=a_ )
def __init__( self ,a_ ,a_ ,a_ ,a_ ) -> List[Any]:
_UpperCAmelCase : Optional[Any] = model
_UpperCAmelCase : Tuple = cache
_UpperCAmelCase : Dict = force
_UpperCAmelCase : Dict = trust_remote_code
def _snake_case ( self ) -> Union[str, Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
| 349 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : List[str] = 3
_UpperCAmelCase : Union[str, Any] = (32, 32)
_UpperCAmelCase : str = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(a_ )
return image
@property
def _snake_case ( self ) -> List[Any]:
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def _snake_case ( self ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCAmelCase : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def _snake_case ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModel(a_ )
@property
def _snake_case ( self ) -> Union[str, Any]:
def extract(*a_ ,**a_ ):
class lowercase :
"""simple docstring"""
def __init__( self ) -> Any:
_UpperCAmelCase : str = torch.ones([0] )
def _snake_case ( self ,a_ ) -> Any:
self.pixel_values.to(a_ )
return self
return Out()
return extract
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet
_UpperCAmelCase : int = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=a_ ,set_alpha_to_one=a_ ,)
_UpperCAmelCase : Optional[int] = self.dummy_vae
_UpperCAmelCase : Optional[int] = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : int = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Union[str, Any] = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Optional[int] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : int = output.images
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : str = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Tuple = self.dummy_cond_unet
_UpperCAmelCase : Optional[int] = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : int = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : str = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : str = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : int = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Any = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : List[Any] = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : List[Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : Any = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Union[str, Any] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=a_ )
assert isinstance(a_ ,a_ )
assert isinstance(pipe.scheduler ,a_ )
assert pipe.safety_checker is None
_UpperCAmelCase : Dict = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained(a_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_UpperCAmelCase : Union[str, Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Optional[int] = self.dummy_cond_unet
_UpperCAmelCase : str = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : List[str] = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
_UpperCAmelCase : str = unet.half()
_UpperCAmelCase : List[str] = vae.half()
_UpperCAmelCase : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Dict = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : str = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : int = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Dict = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : List[Any] = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
_UpperCAmelCase : Any = 4_003_660_346
_UpperCAmelCase : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : str = output.images
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
_UpperCAmelCase : List[str] = torch.manual_seed(a_ )
_UpperCAmelCase : Optional[Any] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> int:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
_UpperCAmelCase : Optional[Any] = 2_734_971_755
_UpperCAmelCase : Optional[int] = 7
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : Optional[int] = output.images
_UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[int] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
_UpperCAmelCase : Optional[int] = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Union[str, Any] = output.images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Optional[int] = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
_UpperCAmelCase : Dict = 1_044_355_234
_UpperCAmelCase : int = 12
_UpperCAmelCase : Optional[Any] = torch.manual_seed(a_ )
_UpperCAmelCase : List[str] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
_UpperCAmelCase : Tuple = torch.manual_seed(a_ )
_UpperCAmelCase : Dict = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Optional[Any] = output.images
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 349 | 1 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = right or len(lowerCAmelCase_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowerCAmelCase_ , lowerCAmelCase_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : str = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
create_state_space_tree(lowerCAmelCase_ , [] , 0 )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> None:
'''simple docstring'''
if index == len(lowerCAmelCase_ ):
print(lowerCAmelCase_ )
return
create_state_space_tree(lowerCAmelCase_ , lowerCAmelCase_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowerCAmelCase_ , lowerCAmelCase_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
A_ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 349 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Any = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """yolos"""
def __init__( self ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.0 ,a_=0.0 ,a_=0.02 ,a_=1E-1_2 ,a_=[512, 864] ,a_=16 ,a_=3 ,a_=True ,a_=100 ,a_=True ,a_=False ,a_=1 ,a_=5 ,a_=2 ,a_=5 ,a_=2 ,a_=0.1 ,**a_ ,) -> List[str]:
super().__init__(**a_ )
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : int = image_size
_UpperCAmelCase : Dict = patch_size
_UpperCAmelCase : Tuple = num_channels
_UpperCAmelCase : Optional[Any] = qkv_bias
_UpperCAmelCase : List[Any] = num_detection_tokens
_UpperCAmelCase : Tuple = use_mid_position_embeddings
_UpperCAmelCase : int = auxiliary_loss
# Hungarian matcher
_UpperCAmelCase : Dict = class_cost
_UpperCAmelCase : Dict = bbox_cost
_UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCAmelCase : int = bbox_loss_coefficient
_UpperCAmelCase : Optional[Any] = giou_loss_coefficient
_UpperCAmelCase : Union[str, Any] = eos_coefficient
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1E-4
@property
def _snake_case ( self ) -> int:
return 12
| 349 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = LEDTokenizer
UpperCAmelCase = LEDTokenizerFast
UpperCAmelCase = True
def _snake_case ( self ) -> Any:
super().setUp()
_UpperCAmelCase : Optional[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_UpperCAmelCase : List[str] = dict(zip(a_ ,range(len(a_ ) ) ) )
_UpperCAmelCase : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_UpperCAmelCase : Optional[int] = {"""unk_token""": """<unk>"""}
_UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a_ ) )
def _snake_case ( self ,**a_ ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**a_ )
def _snake_case ( self ,**a_ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**a_ )
def _snake_case ( self ,a_ ) -> int:
return "lower newer", "lower newer"
@cached_property
def _snake_case ( self ) -> str:
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def _snake_case ( self ) -> List[Any]:
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_UpperCAmelCase : Optional[Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : List[Any] = tokenizer(a_ ,max_length=len(a_ ) ,padding=a_ ,return_tensors="""pt""" )
self.assertIsInstance(a_ ,a_ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
_UpperCAmelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(a_ ,a_ )
@require_torch
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : int = tokenizer(a_ ,padding=a_ ,return_tensors="""pt""" )
self.assertIn("""input_ids""" ,a_ )
self.assertIn("""attention_mask""" ,a_ )
self.assertNotIn("""labels""" ,a_ )
self.assertNotIn("""decoder_attention_mask""" ,a_ )
@require_torch
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : Optional[int] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : Optional[int] = tokenizer(text_target=a_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" )
self.assertEqual(32 ,targets["""input_ids"""].shape[1] )
@require_torch
def _snake_case ( self ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : List[str] = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] ,padding=a_ ,truncation=a_ ,return_tensors="""pt""" )
self.assertIsInstance(a_ ,a_ )
self.assertEqual(batch.input_ids.shape ,(2, 5_122) )
@require_torch
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization."""]
_UpperCAmelCase : str = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : Any = tokenizer(a_ ,return_tensors="""pt""" )
_UpperCAmelCase : Any = tokenizer(text_target=a_ ,return_tensors="""pt""" )
_UpperCAmelCase : List[str] = inputs["""input_ids"""]
_UpperCAmelCase : int = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _snake_case ( self ) -> List[Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : List[Any] = ["""Summary of the text.""", """Another summary."""]
_UpperCAmelCase : int = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_UpperCAmelCase : Any = tokenizer(a_ ,padding=a_ )
_UpperCAmelCase : Any = [[0] * len(a_ ) for x in encoded_output["""input_ids"""]]
_UpperCAmelCase : Optional[int] = tokenizer.pad(a_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] ,a_ )
def _snake_case ( self ) -> List[str]:
pass
def _snake_case ( self ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(a_ ,**a_ )
_UpperCAmelCase : str = self.tokenizer_class.from_pretrained(a_ ,**a_ )
_UpperCAmelCase : int = """A, <mask> AllenNLP sentence."""
_UpperCAmelCase : str = tokenizer_r.encode_plus(a_ ,add_special_tokens=a_ ,return_token_type_ids=a_ )
_UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(a_ ,add_special_tokens=a_ ,return_token_type_ids=a_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,)
_UpperCAmelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_UpperCAmelCase : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
a_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
a_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 349 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Any = [10, 20, 30, 40, 50, 60]
_UpperCAmelCase : Dict = [2, 4, 6, 8, 10, 12]
_UpperCAmelCase : Optional[int] = 100
self.assertEqual(kp.calc_profit(a_ ,a_ ,a_ ) ,210 )
def _snake_case ( self ) -> Union[str, Any]:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Any:
self.assertRaisesRegex(a_ ,"""Weight can not be negative.""" )
def _snake_case ( self ) -> Optional[Any]:
self.assertRaisesRegex(a_ ,"""Profit can not be negative.""" )
def _snake_case ( self ) -> Dict:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Tuple:
self.assertRaisesRegex(
a_ ,"""The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 349 | 1 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
A_ : Optional[Any] = get_logger(__name__)
class lowercase :
"""simple docstring"""
def __init__( self ,a_ = None ) -> int:
_UpperCAmelCase : Tuple = (
os.path.join(a_ ,config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_UpperCAmelCase : Any = Extractor
def _snake_case ( self ,a_ ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_UpperCAmelCase : Any = os.path.abspath(a_ )
return os.path.join(self.extract_dir ,hash_url_to_filename(a_ ) )
def _snake_case ( self ,a_ ,a_ ) -> bool:
return force_extract or (
not os.path.isfile(a_ ) and not (os.path.isdir(a_ ) and os.listdir(a_ ))
)
def _snake_case ( self ,a_ ,a_ = False ) -> str:
_UpperCAmelCase : List[Any] = self.extractor.infer_extractor_format(a_ )
if not extractor_format:
return input_path
_UpperCAmelCase : Optional[int] = self._get_output_path(a_ )
if self._do_extract(a_ ,a_ ):
self.extractor.extract(a_ ,a_ ,a_ )
return output_path
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@classmethod
@abstractmethod
def _snake_case ( cls ,a_ ,**a_ ) -> bool:
...
@staticmethod
@abstractmethod
def _snake_case ( a_ ,a_ ) -> None:
...
class lowercase ( _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = []
@staticmethod
def _snake_case ( a_ ,a_ ) -> List[Any]:
with open(a_ ,"""rb""" ) as f:
return f.read(a_ )
@classmethod
def _snake_case ( cls ,a_ ,a_ = b"" ) -> bool:
if not magic_number:
_UpperCAmelCase : Any = max(len(a_ ) for cls_magic_number in cls.magic_numbers )
try:
_UpperCAmelCase : Optional[Any] = cls.read_magic_number(a_ ,a_ )
except OSError:
return False
return any(magic_number.startswith(a_ ) for cls_magic_number in cls.magic_numbers )
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@classmethod
def _snake_case ( cls ,a_ ,**a_ ) -> bool:
return tarfile.is_tarfile(a_ )
@staticmethod
def _snake_case ( a_ ,a_ ) -> Optional[int]:
def resolved(a_ ) -> str:
return os.path.realpath(os.path.abspath(a_ ) )
def badpath(a_ ,a_ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(a_ ,a_ ) ).startswith(a_ )
def badlink(a_ ,a_ ) -> bool:
# Links are interpreted relative to the directory containing the link
_UpperCAmelCase : List[Any] = resolved(os.path.join(a_ ,os.path.dirname(info.name ) ) )
return badpath(info.linkname ,base=a_ )
_UpperCAmelCase : List[str] = resolved(a_ )
for finfo in members:
if badpath(finfo.name ,a_ ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(a_ ,a_ ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(a_ ,a_ ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def _snake_case ( a_ ,a_ ) -> None:
os.makedirs(a_ ,exist_ok=a_ )
_UpperCAmelCase : Optional[int] = tarfile.open(a_ )
tar_file.extractall(a_ ,members=TarExtractor.safemembers(a_ ,a_ ) )
tar_file.close()
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = [b"""\x1F\x8B"""]
@staticmethod
def _snake_case ( a_ ,a_ ) -> None:
with gzip.open(a_ ,"""rb""" ) as gzip_file:
with open(a_ ,"""wb""" ) as extracted_file:
shutil.copyfileobj(a_ ,a_ )
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = [
b"""PK\x03\x04""",
b"""PK\x05\x06""", # empty archive
b"""PK\x07\x08""", # spanned archive
]
@classmethod
def _snake_case ( cls ,a_ ,a_ = b"" ) -> bool:
if super().is_extractable(a_ ,magic_number=a_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(a_ ,"""rb""" ) as fp:
_UpperCAmelCase : int = _EndRecData(a_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_UpperCAmelCase : List[str] = fp.read(a_ ) # CD is where we expect it to be
if len(a_ ) == sizeCentralDir:
_UpperCAmelCase : List[Any] = struct.unpack(a_ ,a_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _snake_case ( a_ ,a_ ) -> None:
os.makedirs(a_ ,exist_ok=a_ )
with zipfile.ZipFile(a_ ,"""r""" ) as zip_file:
zip_file.extractall(a_ )
zip_file.close()
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = [b"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def _snake_case ( a_ ,a_ ) -> None:
with lzma.open(a_ ) as compressed_file:
with open(a_ ,"""wb""" ) as extracted_file:
shutil.copyfileobj(a_ ,a_ )
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = [b"""Rar!\x1a\x07\x00""", b"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def _snake_case ( a_ ,a_ ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(a_ ,exist_ok=a_ )
_UpperCAmelCase : List[str] = rarfile.RarFile(a_ )
rf.extractall(a_ )
rf.close()
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = [b"""\x28\xb5\x2F\xFD"""]
@staticmethod
def _snake_case ( a_ ,a_ ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
_UpperCAmelCase : Optional[Any] = zstd.ZstdDecompressor()
with open(a_ ,"""rb""" ) as ifh, open(a_ ,"""wb""" ) as ofh:
dctx.copy_stream(a_ ,a_ )
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = [b"""\x42\x5A\x68"""]
@staticmethod
def _snake_case ( a_ ,a_ ) -> None:
with bza.open(a_ ,"""rb""" ) as compressed_file:
with open(a_ ,"""wb""" ) as extracted_file:
shutil.copyfileobj(a_ ,a_ )
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = [b"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def _snake_case ( a_ ,a_ ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(a_ ,exist_ok=a_ )
with pyazr.SevenZipFile(a_ ,"""r""" ) as archive:
archive.extractall(a_ )
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = [b"""\x04\x22\x4D\x18"""]
@staticmethod
def _snake_case ( a_ ,a_ ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(a_ ,"""rb""" ) as compressed_file:
with open(a_ ,"""wb""" ) as extracted_file:
shutil.copyfileobj(a_ ,a_ )
class lowercase :
"""simple docstring"""
UpperCAmelCase = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _snake_case ( cls ) -> Union[str, Any]:
return max(
len(a_ )
for extractor in cls.extractors.values()
if issubclass(a_ ,a_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _snake_case ( a_ ,a_ ) -> Optional[int]:
try:
return MagicNumberBaseExtractor.read_magic_number(a_ ,magic_number_length=a_ )
except OSError:
return b""
@classmethod
def _snake_case ( cls ,a_ ,a_ = False ) -> bool:
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" ,category=a_ ,)
_UpperCAmelCase : Tuple = cls.infer_extractor_format(a_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _snake_case ( cls ,a_ ) -> str: # <Added version="2.4.0"/>
_UpperCAmelCase : str = cls._get_magic_number_max_length()
_UpperCAmelCase : Tuple = cls._read_magic_number(a_ ,a_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(a_ ,magic_number=a_ ):
return extractor_format
@classmethod
def _snake_case ( cls ,a_ ,a_ ,a_ = None ,a_ = "deprecated" ,) -> None:
os.makedirs(os.path.dirname(a_ ) ,exist_ok=a_ )
# Prevent parallel extractions
_UpperCAmelCase : Optional[int] = str(Path(a_ ).with_suffix(""".lock""" ) )
with FileLock(a_ ):
shutil.rmtree(a_ ,ignore_errors=a_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(a_ ,a_ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" ,category=a_ ,)
_UpperCAmelCase : Optional[Any] = extractor if extractor != """deprecated""" else extractor_format
else:
_UpperCAmelCase : Tuple = cls.extractors[extractor_format]
return extractor.extract(a_ ,a_ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" ,category=a_ ,)
for extractor in cls.extractors.values():
if extractor.is_extractable(a_ ):
return extractor.extract(a_ ,a_ )
| 349 |
'''simple docstring'''
from __future__ import annotations
import math
def snake_case_ ( lowerCAmelCase_ )-> list[int]:
'''simple docstring'''
if num <= 0:
_UpperCAmelCase : List[Any] = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = [True] * (num + 1)
_UpperCAmelCase : int = []
_UpperCAmelCase : int = 2
_UpperCAmelCase : int = int(math.sqrt(lowerCAmelCase_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCAmelCase_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCAmelCase_ ):
if sieve[i] is True:
_UpperCAmelCase : Tuple = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowerCAmelCase_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 349 | 1 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--user""", type=str, default="""ubuntu""")
parser.add_argument("""--host""", type=str, default="""localhost""")
parser.add_argument("""--key_path""", type=str, default=None)
parser.add_argument("""--instance""", type=str, default="""V100:1""")
parser.add_argument("""--provider""", type=str, default="""cheapest""")
parser.add_argument("""--use_spot""", type=bool, default=False)
parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""")
A_ , A_ : Dict = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("""Cannot specify both BYO and on-demand cluster args""")
A_ : Optional[int] = rh.cluster(
name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path}
)
else:
A_ : Union[str, Any] = rh.cluster(
name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
A_ : Optional[int] = args.example.rsplit("""/""", 1)[0]
# Set up remote environment
cluster.install_packages(["""pip:./"""]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 349 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,a_ ,a_ = None ,a_ = None ,a_ = True ,a_ = None ,a_ = False ,a_ = None ,a_ = True ,a_ = "arrow" ,**a_ ,) -> str:
super().__init__(
split=a_ ,features=a_ ,cache_dir=a_ ,keep_in_memory=a_ ,streaming=a_ ,**a_ ,)
_UpperCAmelCase : Any = load_from_cache_file
_UpperCAmelCase : Optional[int] = file_format
_UpperCAmelCase : int = Spark(
df=a_ ,features=a_ ,cache_dir=a_ ,working_dir=a_ ,**a_ ,)
def _snake_case ( self ) -> int:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_UpperCAmelCase : str = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=a_ ,file_format=self._file_format ,)
return self.builder.as_dataset(split=self.split )
| 349 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Optional[Any] = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """bert"""
def __init__( self ,a_=30_522 ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=512 ,a_=2 ,a_=0.02 ,a_=1E-1_2 ,a_=0 ,a_="absolute" ,a_=True ,a_=None ,**a_ ,) -> Any:
super().__init__(pad_token_id=a_ ,**a_ )
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : str = max_position_embeddings
_UpperCAmelCase : Union[str, Any] = type_vocab_size
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : Any = layer_norm_eps
_UpperCAmelCase : Optional[Any] = position_embedding_type
_UpperCAmelCase : Union[str, Any] = use_cache
_UpperCAmelCase : Optional[int] = classifier_dropout
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 349 |
'''simple docstring'''
A_ : Optional[Any] = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 349 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case_ ( )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowerCAmelCase_ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowerCAmelCase_ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowerCAmelCase_ )
return parser.parse_args()
def snake_case_ ( )-> str:
'''simple docstring'''
_UpperCAmelCase : List[str] = parse_args()
# Import training_script as a module.
_UpperCAmelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCAmelCase : Optional[Any] = script_fpath.stem
_UpperCAmelCase : List[str] = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
_UpperCAmelCase : Dict = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 349 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def snake_case_ ( )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
_UpperCAmelCase : str = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCAmelCase_ , """func""" ):
parser.print_help()
exit(1 )
# Run
_UpperCAmelCase : Optional[int] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 349 | 1 |
'''simple docstring'''
import math
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : str = len(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
_UpperCAmelCase : int = 0
while arr[min(lowerCAmelCase_ , lowerCAmelCase_ ) - 1] < x:
_UpperCAmelCase : Optional[int] = step
step += int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_UpperCAmelCase : List[Any] = prev + 1
if prev == min(lowerCAmelCase_ , lowerCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A_ : str = input("""Enter numbers separated by a comma:\n""").strip()
A_ : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
A_ : int = int(input("""Enter the number to be searched:\n"""))
A_ : Any = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(f"""Number {x} is at index {res}""")
| 349 |
'''simple docstring'''
import math
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : str = len(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
_UpperCAmelCase : int = 0
while arr[min(lowerCAmelCase_ , lowerCAmelCase_ ) - 1] < x:
_UpperCAmelCase : Optional[int] = step
step += int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_UpperCAmelCase : List[Any] = prev + 1
if prev == min(lowerCAmelCase_ , lowerCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A_ : str = input("""Enter numbers separated by a comma:\n""").strip()
A_ : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
A_ : int = int(input("""Enter the number to be searched:\n"""))
A_ : Any = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(f"""Number {x} is at index {res}""")
| 349 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : Any = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
A_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349 |
'''simple docstring'''
import argparse
import copy
def snake_case_ ( lowerCAmelCase_ )-> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = {}
with open(lowerCAmelCase_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_UpperCAmelCase : Optional[int] = []
_list.append([line.split()[1], line.split()[2]] )
_UpperCAmelCase : List[str] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_UpperCAmelCase : List[str] = []
_list.append([line.split()[0], line.split()[2]] )
_UpperCAmelCase : Optional[int] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
with open(lowerCAmelCase_ ) as f:
_UpperCAmelCase : List[Any] = f.read(1 )
_UpperCAmelCase : int = start_node
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Dict = start_node
_UpperCAmelCase : Any = 0
while visiting not in first_solution:
_UpperCAmelCase : Optional[int] = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowerCAmelCase_ ) and k[0] not in first_solution:
_UpperCAmelCase : Optional[int] = k[1]
_UpperCAmelCase : List[str] = k[0]
first_solution.append(lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = distance_of_first_solution + int(lowerCAmelCase_ )
_UpperCAmelCase : Dict = best_node
first_solution.append(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_UpperCAmelCase : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : int = []
for n in solution[1:-1]:
_UpperCAmelCase : Tuple = solution.index(lowerCAmelCase_ )
for kn in solution[1:-1]:
_UpperCAmelCase : int = solution.index(lowerCAmelCase_ )
if n == kn:
continue
_UpperCAmelCase : Tuple = copy.deepcopy(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = kn
_UpperCAmelCase : List[str] = n
_UpperCAmelCase : Optional[int] = 0
for k in _tmp[:-1]:
_UpperCAmelCase : List[str] = _tmp[_tmp.index(lowerCAmelCase_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_UpperCAmelCase : Dict = distance + int(i[1] )
_tmp.append(lowerCAmelCase_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_UpperCAmelCase : Dict = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowerCAmelCase_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : Optional[Any] = first_solution
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[Any] = distance_of_first_solution
_UpperCAmelCase : Dict = solution
while count <= iters:
_UpperCAmelCase : Any = find_neighborhood(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Optional[Any] = neighborhood[index_of_best_solution]
_UpperCAmelCase : Optional[Any] = len(lowerCAmelCase_ ) - 1
_UpperCAmelCase : Optional[Any] = False
while not found:
_UpperCAmelCase : Tuple = 0
while i < len(lowerCAmelCase_ ):
if best_solution[i] != solution[i]:
_UpperCAmelCase : Any = best_solution[i]
_UpperCAmelCase : str = solution[i]
break
_UpperCAmelCase : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = best_solution[:-1]
_UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_UpperCAmelCase : Tuple = cost
_UpperCAmelCase : List[Any] = solution
else:
_UpperCAmelCase : Any = index_of_best_solution + 1
_UpperCAmelCase : Dict = neighborhood[index_of_best_solution]
if len(lowerCAmelCase_ ) >= size:
tabu_list.pop(0 )
_UpperCAmelCase : Optional[Any] = count + 1
return best_solution_ever, best_cost
def snake_case_ ( lowerCAmelCase_=None )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = generate_neighbours(args.File )
_UpperCAmelCase ,_UpperCAmelCase : Tuple = generate_first_solution(
args.File , lowerCAmelCase_ )
_UpperCAmelCase ,_UpperCAmelCase : str = tabu_search(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 349 | 1 |
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ,a_=13 ,a_=7 ,a_=True ,a_=True ,a_=99 ,a_=32 ,a_=5 ,a_=4 ,a_=37 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=50 ,a_=0.02 ,a_=True ,a_=None ,) -> Dict:
_UpperCAmelCase : int = parent
_UpperCAmelCase : str = batch_size
_UpperCAmelCase : List[str] = seq_length
_UpperCAmelCase : Tuple = is_training
_UpperCAmelCase : List[Any] = use_input_mask
_UpperCAmelCase : List[str] = vocab_size
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : str = hidden_act
_UpperCAmelCase : Dict = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : int = max_position_embeddings
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : List[Any] = use_labels
_UpperCAmelCase : List[str] = scope
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
_UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_UpperCAmelCase : int = self.get_config()
return config, input_ids, input_mask, token_labels
def _snake_case ( self ) -> Optional[int]:
return BertGenerationConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=a_ ,initializer_range=self.initializer_range ,)
def _snake_case ( self ) -> List[str]:
(
(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,
) : str = self.prepare_config_and_inputs()
_UpperCAmelCase : Dict = True
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,**a_ ,) -> Any:
_UpperCAmelCase : Optional[int] = BertGenerationEncoder(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase : Optional[Any] = model(a_ ,attention_mask=a_ )
_UpperCAmelCase : Optional[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,**a_ ,) -> List[str]:
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Optional[int] = BertGenerationEncoder(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase : Tuple = model(
a_ ,attention_mask=a_ ,encoder_hidden_states=a_ ,encoder_attention_mask=a_ ,)
_UpperCAmelCase : Dict = model(
a_ ,attention_mask=a_ ,encoder_hidden_states=a_ ,)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,**a_ ,) -> List[str]:
_UpperCAmelCase : str = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[str] = BertGenerationDecoder(config=a_ ).to(a_ ).eval()
# first forward pass
_UpperCAmelCase : List[str] = model(
a_ ,attention_mask=a_ ,encoder_hidden_states=a_ ,encoder_attention_mask=a_ ,use_cache=a_ ,)
_UpperCAmelCase : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) ,config.vocab_size )
_UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
_UpperCAmelCase : str = torch.cat([input_ids, next_tokens] ,dim=-1 )
_UpperCAmelCase : Tuple = torch.cat([input_mask, next_mask] ,dim=-1 )
_UpperCAmelCase : Tuple = model(
a_ ,attention_mask=a_ ,encoder_hidden_states=a_ ,encoder_attention_mask=a_ ,output_hidden_states=a_ ,)["""hidden_states"""][0]
_UpperCAmelCase : Dict = model(
a_ ,attention_mask=a_ ,encoder_hidden_states=a_ ,encoder_attention_mask=a_ ,past_key_values=a_ ,output_hidden_states=a_ ,)["""hidden_states"""][0]
# select random slice
_UpperCAmelCase : str = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_UpperCAmelCase : str = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCAmelCase : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ ,a_ ,atol=1E-3 ) )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,*a_ ,) -> Dict:
_UpperCAmelCase : List[str] = BertGenerationDecoder(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase : Tuple = model(a_ ,attention_mask=a_ ,labels=a_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
_UpperCAmelCase : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
UpperCAmelCase = (BertGenerationDecoder,) if is_torch_available() else ()
UpperCAmelCase = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Any = BertGenerationEncoderTester(self )
_UpperCAmelCase : Union[str, Any] = ConfigTester(self ,config_class=a_ ,hidden_size=37 )
def _snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = """bert"""
self.model_tester.create_and_check_model(a_ ,a_ ,a_ ,a_ )
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a_ )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*a_ )
def _snake_case ( self ) -> Tuple:
# This regression test was failing with PyTorch < 1.3
(
(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,
) : int = self.model_tester.prepare_config_and_inputs_for_decoder()
_UpperCAmelCase : int = None
self.model_tester.create_and_check_model_as_decoder(
a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,)
def _snake_case ( self ) -> int:
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*a_ )
@slow
def _snake_case ( self ) -> Any:
_UpperCAmelCase : int = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(a_ )
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Dict = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
_UpperCAmelCase : str = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
_UpperCAmelCase : Dict = model(a_ )[0]
_UpperCAmelCase : Dict = torch.Size([1, 8, 1_024] )
self.assertEqual(output.shape ,a_ )
_UpperCAmelCase : Dict = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,a_ ,atol=1E-4 ) )
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : Optional[Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
_UpperCAmelCase : Optional[Any] = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
_UpperCAmelCase : Any = model(a_ )[0]
_UpperCAmelCase : Union[str, Any] = torch.Size([1, 8, 50_358] )
self.assertEqual(output.shape ,a_ )
_UpperCAmelCase : Any = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,a_ ,atol=1E-4 ) )
| 349 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase :
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ) -> List[str]:
_UpperCAmelCase : list[list[Edge]] = [[] for _ in range(a_ )]
_UpperCAmelCase : int = size
def __getitem__( self ,a_ ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def _snake_case ( self ) -> List[Any]:
return self._size
def _snake_case ( self ,a_ ,a_ ,a_ ) -> Tuple:
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(a_ ,a_ ) )
def _snake_case ( self ,a_ ,a_ ) -> int | None:
_UpperCAmelCase : Union[str, Any] = deque([start_vertex] )
_UpperCAmelCase : list[int | None] = [None] * self.size
_UpperCAmelCase : Union[str, Any] = 0
while queue:
_UpperCAmelCase : Union[str, Any] = queue.popleft()
_UpperCAmelCase : Union[str, Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_UpperCAmelCase : List[Any] = current_distance + edge.weight
_UpperCAmelCase : List[Any] = distances[edge.destination_vertex]
if (
isinstance(a_ ,a_ )
and new_distance >= dest_vertex_distance
):
continue
_UpperCAmelCase : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_lowerCamelCase )
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
UpperCAmelCase = Features({"""text""": Value("""string""" )} )
UpperCAmelCase = Features({} )
UpperCAmelCase = "text"
@property
def _snake_case ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 349 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : Any = 1_6
A_ : Union[str, Any] = 3_2
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 16 )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase : str = DatasetDict(
{
"""train""": dataset["""train"""].select(lowerCAmelCase_ ),
"""validation""": dataset["""train"""].select(lowerCAmelCase_ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase : Optional[int] = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase : List[str] = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase : Any = 8
else:
_UpperCAmelCase : Dict = None
return tokenizer.pad(
lowerCAmelCase_ , padding="""longest""" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Dict = DataLoader(
tokenized_datasets["""test"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader, test_dataloader
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
# Download the dataset
_UpperCAmelCase : Dict = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_UpperCAmelCase : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : Dict = config["""lr"""]
_UpperCAmelCase : List[Any] = int(config["""num_epochs"""] )
_UpperCAmelCase : str = int(config["""seed"""] )
_UpperCAmelCase : List[Any] = int(config["""batch_size"""] )
_UpperCAmelCase : int = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase : Dict = batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase : Tuple = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
# New Code #
# Create our folds:
_UpperCAmelCase : Any = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_UpperCAmelCase : Tuple = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = get_fold_dataloaders(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase : int = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
_UpperCAmelCase : Dict = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Any = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase : Union[str, Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Dict = outputs.loss
_UpperCAmelCase : int = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
_UpperCAmelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowerCAmelCase_ )
# New Code #
# We also run predictions on the test set at the very end
_UpperCAmelCase : Tuple = []
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Any = outputs.logits
_UpperCAmelCase ,_UpperCAmelCase : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCAmelCase_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_UpperCAmelCase : List[Any] = torch.cat(lowerCAmelCase_ , dim=0 )
_UpperCAmelCase : Union[str, Any] = torch.stack(lowerCAmelCase_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_UpperCAmelCase : List[str] = metric.compute(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )
accelerator.print("""Average test metrics from all folds:""" , lowerCAmelCase_ )
def snake_case_ ( )-> Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=lowerCAmelCase_ , default=3 , help="""The number of splits to perform across the dataset""" )
_UpperCAmelCase : Optional[int] = parser.parse_args()
_UpperCAmelCase : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 349 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : List[str] = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """mobilenet_v1"""
def __init__( self ,a_=3 ,a_=224 ,a_=1.0 ,a_=8 ,a_="relu6" ,a_=True ,a_=0.999 ,a_=0.02 ,a_=0.001 ,**a_ ,) -> str:
super().__init__(**a_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Union[str, Any] = image_size
_UpperCAmelCase : Union[str, Any] = depth_multiplier
_UpperCAmelCase : Union[str, Any] = min_depth
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : List[str] = tf_padding
_UpperCAmelCase : Optional[int] = classifier_dropout_prob
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : List[str] = layer_norm_eps
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def _snake_case ( self ) -> float:
return 1E-4
| 349 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
A_ : Dict = logging.getLogger(__name__)
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """sequence-classification"""
def __init__( self ,a_ ) -> Dict:
if type(a_ ) == dict:
_UpperCAmelCase : Tuple = Namespace(**a_ )
_UpperCAmelCase : Optional[int] = glue_output_modes[hparams.task]
_UpperCAmelCase : Union[str, Any] = glue_tasks_num_labels[hparams.task]
super().__init__(a_ ,a_ ,self.mode )
def _snake_case ( self ,**a_ ) -> Optional[Any]:
return self.model(**a_ )
def _snake_case ( self ,a_ ,a_ ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCAmelCase : Any = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_UpperCAmelCase : Any = self(**a_ )
_UpperCAmelCase : int = outputs[0]
_UpperCAmelCase : Any = self.trainer.lr_schedulers[0]["""scheduler"""]
_UpperCAmelCase : Any = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _snake_case ( self ) -> int:
_UpperCAmelCase : Optional[int] = self.hparams
_UpperCAmelCase : int = processors[args.task]()
_UpperCAmelCase : str = processor.get_labels()
for mode in ["train", "dev"]:
_UpperCAmelCase : Tuple = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" ,a_ )
else:
logger.info("""Creating features from dataset file at %s""" ,args.data_dir )
_UpperCAmelCase : List[Any] = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
_UpperCAmelCase : Union[str, Any] = convert_examples_to_features(
a_ ,self.tokenizer ,max_length=args.max_seq_length ,label_list=self.labels ,output_mode=args.glue_output_mode ,)
logger.info("""Saving features into cached file %s""" ,a_ )
torch.save(a_ ,a_ )
def _snake_case ( self ,a_ ,a_ ,a_ = False ) -> DataLoader:
_UpperCAmelCase : Union[str, Any] = """dev""" if mode == """test""" else mode
_UpperCAmelCase : Tuple = self._feature_file(a_ )
logger.info("""Loading features from cached file %s""" ,a_ )
_UpperCAmelCase : Union[str, Any] = torch.load(a_ )
_UpperCAmelCase : List[str] = torch.tensor([f.input_ids for f in features] ,dtype=torch.long )
_UpperCAmelCase : Tuple = torch.tensor([f.attention_mask for f in features] ,dtype=torch.long )
_UpperCAmelCase : str = torch.tensor([f.token_type_ids for f in features] ,dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_UpperCAmelCase : Optional[int] = torch.tensor([f.label for f in features] ,dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_UpperCAmelCase : str = torch.tensor([f.label for f in features] ,dtype=torch.float )
return DataLoader(
TensorDataset(a_ ,a_ ,a_ ,a_ ) ,batch_size=a_ ,shuffle=a_ ,)
def _snake_case ( self ,a_ ,a_ ) -> Any:
_UpperCAmelCase : Any = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCAmelCase : int = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_UpperCAmelCase : List[str] = self(**a_ )
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = outputs[:2]
_UpperCAmelCase : List[str] = logits.detach().cpu().numpy()
_UpperCAmelCase : Union[str, Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _snake_case ( self ,a_ ) -> tuple:
_UpperCAmelCase : Optional[int] = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
_UpperCAmelCase : Any = np.concatenate([x["""pred"""] for x in outputs] ,axis=0 )
if self.hparams.glue_output_mode == "classification":
_UpperCAmelCase : int = np.argmax(a_ ,axis=1 )
elif self.hparams.glue_output_mode == "regression":
_UpperCAmelCase : Union[str, Any] = np.squeeze(a_ )
_UpperCAmelCase : str = np.concatenate([x["""target"""] for x in outputs] ,axis=0 )
_UpperCAmelCase : Tuple = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCAmelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCAmelCase : Optional[int] = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task ,a_ ,a_ )}
_UpperCAmelCase : Dict = dict(results.items() )
_UpperCAmelCase : Any = results
return ret, preds_list, out_label_list
def _snake_case ( self ,a_ ) -> dict:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = self._eval_end(a_ )
_UpperCAmelCase : List[Any] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _snake_case ( self ,a_ ) -> dict:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = self._eval_end(a_ )
_UpperCAmelCase : List[Any] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _snake_case ( a_ ,a_ ) -> Any:
BaseTransformer.add_model_specific_args(a_ ,a_ )
parser.add_argument(
"""--max_seq_length""" ,default=128 ,type=a_ ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--task""" ,default="""""" ,type=a_ ,required=a_ ,help="""The GLUE task to run""" ,)
parser.add_argument(
"""--gpus""" ,default=0 ,type=a_ ,help="""The number of GPUs allocated for this, it is by default 0 meaning none""" ,)
parser.add_argument(
"""--overwrite_cache""" ,action="""store_true""" ,help="""Overwrite the cached training and evaluation sets""" )
return parser
def snake_case_ ( )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
add_generic_args(lowerCAmelCase_ , os.getcwd() )
_UpperCAmelCase : Optional[int] = GLUETransformer.add_model_specific_args(lowerCAmelCase_ , os.getcwd() )
_UpperCAmelCase : Optional[int] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_UpperCAmelCase : Optional[int] = os.path.join(
"""./results""" , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_UpperCAmelCase : int = GLUETransformer(lowerCAmelCase_ )
_UpperCAmelCase : Any = generic_train(lowerCAmelCase_ , lowerCAmelCase_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_UpperCAmelCase : int = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=lowerCAmelCase_ ) )
_UpperCAmelCase : int = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 349 | 1 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> list[int]:
'''simple docstring'''
_UpperCAmelCase : int = int(lowerCAmelCase_ )
# Initialize Result
_UpperCAmelCase : int = []
# Traverse through all denomination
for denomination in reversed(lowerCAmelCase_ ):
# Find denominations
while int(lowerCAmelCase_ ) >= int(lowerCAmelCase_ ):
total_value -= int(lowerCAmelCase_ )
answer.append(lowerCAmelCase_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
A_ : Optional[Any] = []
A_ : int = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
A_ : List[str] = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(f"""Denomination {i}: """).strip()))
A_ : Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
A_ : int = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
A_ : int = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(f"""Following is minimal change for {value}: """)
A_ : Dict = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 349 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Union[str, Any] = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """roformer"""
def __init__( self ,a_=50_000 ,a_=None ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=1_536 ,a_=2 ,a_=0.02 ,a_=1E-1_2 ,a_=0 ,a_=False ,a_=True ,**a_ ,) -> Tuple:
super().__init__(pad_token_id=a_ ,**a_ )
_UpperCAmelCase : List[Any] = vocab_size
_UpperCAmelCase : str = hidden_size if embedding_size is None else embedding_size
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Any = type_vocab_size
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Dict = layer_norm_eps
_UpperCAmelCase : Optional[int] = rotary_value
_UpperCAmelCase : Any = use_cache
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase : List[Any] = {0: """batch""", 1: """sequence"""}
_UpperCAmelCase : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 349 | 1 |
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ) -> Tuple:
if isinstance(a_ ,a_ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
_UpperCAmelCase : Any = deepcopy(a_ )
elif os.path.exists(a_ ):
with io.open(a_ ,"""r""" ,encoding="""utf-8""" ) as f:
_UpperCAmelCase : Any = json.load(a_ )
else:
try:
_UpperCAmelCase : Union[str, Any] = baseaa.urlsafe_baadecode(a_ ).decode("""utf-8""" )
_UpperCAmelCase : Optional[int] = json.loads(a_ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
_UpperCAmelCase : Any = config
self.set_stage_and_offload()
def _snake_case ( self ) -> Dict:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
_UpperCAmelCase : Any = self.get_value("""zero_optimization.stage""" ,-1 )
# offload
_UpperCAmelCase : List[Any] = False
if self.is_zeroa() or self.is_zeroa():
_UpperCAmelCase : Optional[int] = set(["""cpu""", """nvme"""] )
_UpperCAmelCase : Optional[int] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
_UpperCAmelCase : List[Any] = True
def _snake_case ( self ,a_ ) -> Tuple:
_UpperCAmelCase : List[str] = self.config
# find the config node of interest if it exists
_UpperCAmelCase : List[Any] = ds_key_long.split(""".""" )
_UpperCAmelCase : Dict = nodes.pop()
for node in nodes:
_UpperCAmelCase : Optional[int] = config.get(a_ )
if config is None:
return None, ds_key
return config, ds_key
def _snake_case ( self ,a_ ,a_=None ) -> Tuple:
_UpperCAmelCase ,_UpperCAmelCase : int = self.find_config_node(a_ )
if config is None:
return default
return config.get(a_ ,a_ )
def _snake_case ( self ,a_ ,a_=False ) -> int:
_UpperCAmelCase : Optional[int] = self.config
# find the config node of interest if it exists
_UpperCAmelCase : Tuple = ds_key_long.split(""".""" )
for node in nodes:
_UpperCAmelCase : Dict = config
_UpperCAmelCase : List[str] = config.get(a_ )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(a_ )
def _snake_case ( self ,a_ ) -> Any:
_UpperCAmelCase : str = self.get_value(a_ )
return False if value is None else bool(a_ )
def _snake_case ( self ,a_ ) -> str:
_UpperCAmelCase : Optional[int] = self.get_value(a_ )
return False if value is None else not bool(a_ )
def _snake_case ( self ) -> List[Any]:
return self._stage == 2
def _snake_case ( self ) -> Optional[Any]:
return self._stage == 3
def _snake_case ( self ) -> Dict:
return self._offload
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ) -> Optional[Any]:
_UpperCAmelCase : List[Any] = engine
def _snake_case ( self ,a_ ,**a_ ) -> Tuple:
# runs backpropagation and handles mixed precision
self.engine.backward(a_ ,**a_ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,a_ ) -> str:
super().__init__(a_ ,device_placement=a_ ,scaler=a_ )
_UpperCAmelCase : Dict = hasattr(self.optimizer ,"""overflow""" )
def _snake_case ( self ,a_=None ) -> Union[str, Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _snake_case ( self ) -> Optional[int]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _snake_case ( self ) -> Any:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,a_ ,a_ ) -> Union[str, Any]:
super().__init__(a_ ,a_ )
def _snake_case ( self ) -> Any:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ,a_=0.001 ,a_=0 ,**a_ ) -> Union[str, Any]:
_UpperCAmelCase : Any = params
_UpperCAmelCase : Optional[int] = lr
_UpperCAmelCase : Optional[Any] = weight_decay
_UpperCAmelCase : Tuple = kwargs
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ,a_=None ,a_=0 ,**a_ ) -> Dict:
_UpperCAmelCase : List[str] = optimizer
_UpperCAmelCase : str = total_num_steps
_UpperCAmelCase : Optional[Any] = warmup_num_steps
_UpperCAmelCase : int = kwargs
| 349 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@slow
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" ,"""prajjwal1/bert-tiny""" )
_UpperCAmelCase : List[Any] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_UpperCAmelCase : List[Any] = bertabert.config.encoder.vocab_size
_UpperCAmelCase : Optional[int] = tokenizer.sep_token_id
_UpperCAmelCase : Union[str, Any] = tokenizer.cls_token_id
_UpperCAmelCase : str = 128
_UpperCAmelCase : List[str] = datasets.load_dataset("""cnn_dailymail""" ,"""3.0.0""" ,split="""train[:1%]""" )
_UpperCAmelCase : Union[str, Any] = datasets.load_dataset("""cnn_dailymail""" ,"""3.0.0""" ,split="""validation[:1%]""" )
_UpperCAmelCase : Any = train_dataset.select(range(32 ) )
_UpperCAmelCase : Any = val_dataset.select(range(16 ) )
_UpperCAmelCase : List[Any] = 4
def _map_to_encoder_decoder_inputs(a_ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCAmelCase : int = tokenizer(batch["""article"""] ,padding="""max_length""" ,truncation=a_ ,max_length=512 )
_UpperCAmelCase : Tuple = tokenizer(batch["""highlights"""] ,padding="""max_length""" ,truncation=a_ ,max_length=128 )
_UpperCAmelCase : int = inputs.input_ids
_UpperCAmelCase : Union[str, Any] = inputs.attention_mask
_UpperCAmelCase : Union[str, Any] = outputs.input_ids
_UpperCAmelCase : Dict = outputs.input_ids.copy()
_UpperCAmelCase : Dict = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_UpperCAmelCase : Optional[int] = outputs.attention_mask
assert all(len(a_ ) == 512 for x in inputs.input_ids )
assert all(len(a_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(a_ ):
_UpperCAmelCase : Optional[int] = pred.label_ids
_UpperCAmelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(a_ ,skip_special_tokens=a_ )
_UpperCAmelCase : str = tokenizer.batch_decode(a_ ,skip_special_tokens=a_ )
_UpperCAmelCase : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(a_ ) )] ) / len(a_ )
return {"accuracy": accuracy}
# map train dataset
_UpperCAmelCase : Union[str, Any] = train_dataset.map(
_map_to_encoder_decoder_inputs ,batched=a_ ,batch_size=a_ ,remove_columns=["""article""", """highlights"""] ,)
train_dataset.set_format(
type="""torch""" ,columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] ,)
# same for validation dataset
_UpperCAmelCase : List[str] = val_dataset.map(
_map_to_encoder_decoder_inputs ,batched=a_ ,batch_size=a_ ,remove_columns=["""article""", """highlights"""] ,)
val_dataset.set_format(
type="""torch""" ,columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] ,)
_UpperCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : List[str] = SeqaSeqTrainingArguments(
output_dir=a_ ,per_device_train_batch_size=a_ ,per_device_eval_batch_size=a_ ,predict_with_generate=a_ ,evaluation_strategy="""steps""" ,do_train=a_ ,do_eval=a_ ,warmup_steps=0 ,eval_steps=2 ,logging_steps=2 ,)
# instantiate trainer
_UpperCAmelCase : int = SeqaSeqTrainer(
model=a_ ,args=a_ ,compute_metrics=_compute_metrics ,train_dataset=a_ ,eval_dataset=a_ ,tokenizer=a_ ,)
# start training
trainer.train()
| 349 | 1 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : int = logging.get_logger(__name__)
A_ : Optional[Any] = {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """segformer"""
def __init__( self ,a_=3 ,a_=4 ,a_=[2, 2, 2, 2] ,a_=[8, 4, 2, 1] ,a_=[32, 64, 160, 256] ,a_=[7, 3, 3, 3] ,a_=[4, 2, 2, 2] ,a_=[1, 2, 5, 8] ,a_=[4, 4, 4, 4] ,a_="gelu" ,a_=0.0 ,a_=0.0 ,a_=0.1 ,a_=0.02 ,a_=0.1 ,a_=1E-6 ,a_=256 ,a_=255 ,**a_ ,) -> Optional[int]:
super().__init__(**a_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" ,a_ ,)
_UpperCAmelCase : Optional[int] = num_channels
_UpperCAmelCase : str = num_encoder_blocks
_UpperCAmelCase : Dict = depths
_UpperCAmelCase : Tuple = sr_ratios
_UpperCAmelCase : List[str] = hidden_sizes
_UpperCAmelCase : Dict = patch_sizes
_UpperCAmelCase : List[str] = strides
_UpperCAmelCase : Optional[int] = mlp_ratios
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Tuple = hidden_act
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = classifier_dropout_prob
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : List[Any] = drop_path_rate
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : str = decoder_hidden_size
_UpperCAmelCase : str = kwargs.get("""reshape_last_stage""" ,a_ )
_UpperCAmelCase : str = semantic_loss_ignore_index
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1E-4
@property
def _snake_case ( self ) -> int:
return 12
| 349 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
A_ : List[Any] = 637_8137.0
A_ : Dict = 635_6752.31_4245
A_ : int = 6_3_7_8_1_3_7
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> float:
'''simple docstring'''
_UpperCAmelCase : Tuple = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_UpperCAmelCase : Any = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
_UpperCAmelCase : Optional[Any] = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_UpperCAmelCase : Union[str, Any] = haversine_distance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_UpperCAmelCase : Optional[int] = (b_lata + b_lata) / 2
_UpperCAmelCase : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_UpperCAmelCase : List[str] = (sin(lowerCAmelCase_ ) ** 2) * (cos(lowerCAmelCase_ ) ** 2)
_UpperCAmelCase : Union[str, Any] = cos(sigma / 2 ) ** 2
_UpperCAmelCase : Dict = (sigma - sin(lowerCAmelCase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_UpperCAmelCase : Union[str, Any] = (cos(lowerCAmelCase_ ) ** 2) * (sin(lowerCAmelCase_ ) ** 2)
_UpperCAmelCase : Union[str, Any] = sin(sigma / 2 ) ** 2
_UpperCAmelCase : Optional[Any] = (sigma + sin(lowerCAmelCase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Any = [10, 20, 30, 40, 50, 60]
_UpperCAmelCase : Dict = [2, 4, 6, 8, 10, 12]
_UpperCAmelCase : Optional[int] = 100
self.assertEqual(kp.calc_profit(a_ ,a_ ,a_ ) ,210 )
def _snake_case ( self ) -> Union[str, Any]:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Any:
self.assertRaisesRegex(a_ ,"""Weight can not be negative.""" )
def _snake_case ( self ) -> Optional[Any]:
self.assertRaisesRegex(a_ ,"""Profit can not be negative.""" )
def _snake_case ( self ) -> Dict:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Tuple:
self.assertRaisesRegex(
a_ ,"""The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 349 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 100 , )-> float:
'''simple docstring'''
_UpperCAmelCase : str = x_start
_UpperCAmelCase : Union[str, Any] = fnc(lowerCAmelCase_ )
_UpperCAmelCase : Tuple = 0.0
for _ in range(lowerCAmelCase_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_UpperCAmelCase : Any = (x_end - x_start) / steps + xa
_UpperCAmelCase : List[Any] = fnc(lowerCAmelCase_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_UpperCAmelCase : Any = xa
_UpperCAmelCase : str = fxa
return area
if __name__ == "__main__":
def snake_case_ ( lowerCAmelCase_ )-> Any:
'''simple docstring'''
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
A_ : List[str] = 1_0
while i <= 1_0_0_0_0_0:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 1_0
| 349 | 1 |
'''simple docstring'''
import copy
import re
class lowercase :
"""simple docstring"""
UpperCAmelCase = """hp"""
UpperCAmelCase = {}
UpperCAmelCase = None
@classmethod
def _snake_case ( cls ,a_ ,a_ ) -> int:
_UpperCAmelCase : List[str] = prefix
_UpperCAmelCase : int = defaults
cls.build_naming_info()
@staticmethod
def _snake_case ( a_ ,a_ ) -> List[Any]:
if len(a_ ) == 0:
return ""
_UpperCAmelCase : Dict = None
if any(char.isdigit() for char in word ):
raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 ,len(a_ ) + 1 ):
_UpperCAmelCase : Any = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
_UpperCAmelCase : List[Any] = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(a_ ):
_UpperCAmelCase : Optional[int] = """"""
while integer != 0:
_UpperCAmelCase : Union[str, Any] = chr(ord("""A""" ) + integer % 10 ) + s
integer //= 10
return s
_UpperCAmelCase : Optional[int] = 0
while True:
_UpperCAmelCase : Union[str, Any] = word + """#""" + int_to_alphabetic(a_ )
if sword in info["reverse_short_word"]:
continue
else:
_UpperCAmelCase : List[Any] = sword
break
_UpperCAmelCase : int = short_word
_UpperCAmelCase : Any = word
return short_word
@staticmethod
def _snake_case ( a_ ,a_ ) -> int:
_UpperCAmelCase : int = param_name.split("""_""" )
_UpperCAmelCase : Optional[Any] = [TrialShortNamer.shortname_for_word(a_ ,a_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
_UpperCAmelCase : List[str] = ["""""", """_"""]
for separator in separators:
_UpperCAmelCase : Tuple = separator.join(a_ )
if shortname not in info["reverse_short_param"]:
_UpperCAmelCase : Optional[int] = shortname
_UpperCAmelCase : Optional[int] = param_name
return shortname
return param_name
@staticmethod
def _snake_case ( a_ ,a_ ) -> Tuple:
_UpperCAmelCase : int = TrialShortNamer.shortname_for_key(a_ ,a_ )
_UpperCAmelCase : Optional[int] = short_name
_UpperCAmelCase : str = param_name
@classmethod
def _snake_case ( cls ) -> Union[str, Any]:
if cls.NAMING_INFO is not None:
return
_UpperCAmelCase : Tuple = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
_UpperCAmelCase : Any = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(a_ ,a_ )
_UpperCAmelCase : Optional[Any] = info
@classmethod
def _snake_case ( cls ,a_ ) -> Any:
cls.build_naming_info()
assert cls.PREFIX is not None
_UpperCAmelCase : Any = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
_UpperCAmelCase : Union[str, Any] = cls.NAMING_INFO["""short_param"""][k]
if isinstance(a_ ,a_ ):
_UpperCAmelCase : Optional[Any] = 1 if v else 0
_UpperCAmelCase : int = """""" if isinstance(a_ ,(int, float) ) else """-"""
_UpperCAmelCase : Union[str, Any] = f'''{key}{sep}{v}'''
name.append(a_ )
return "_".join(a_ )
@classmethod
def _snake_case ( cls ,a_ ) -> str:
_UpperCAmelCase : Union[str, Any] = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
_UpperCAmelCase : Optional[Any] = []
else:
_UpperCAmelCase : Optional[int] = repr.split("""_""" )
_UpperCAmelCase : List[Any] = {}
for value in values:
if "-" in value:
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = value.split("""-""" )
else:
_UpperCAmelCase : int = re.sub("""[0-9.]""" ,"""""" ,a_ )
_UpperCAmelCase : Union[str, Any] = float(re.sub("""[^0-9.]""" ,"""""" ,a_ ) )
_UpperCAmelCase : Union[str, Any] = cls.NAMING_INFO["""reverse_short_param"""][p_k]
_UpperCAmelCase : List[str] = p_v
for k in cls.DEFAULTS:
if k not in parameters:
_UpperCAmelCase : List[str] = cls.DEFAULTS[k]
return parameters
| 349 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case_ ( )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowerCAmelCase_ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowerCAmelCase_ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowerCAmelCase_ )
return parser.parse_args()
def snake_case_ ( )-> str:
'''simple docstring'''
_UpperCAmelCase : List[str] = parse_args()
# Import training_script as a module.
_UpperCAmelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCAmelCase : Optional[Any] = script_fpath.stem
_UpperCAmelCase : List[str] = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
_UpperCAmelCase : Dict = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 349 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : List[str] = {
"""configuration_conditional_detr""": [
"""CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ConditionalDetrConfig""",
"""ConditionalDetrOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ["""ConditionalDetrFeatureExtractor"""]
A_ : Tuple = ["""ConditionalDetrImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"""CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConditionalDetrForObjectDetection""",
"""ConditionalDetrForSegmentation""",
"""ConditionalDetrModel""",
"""ConditionalDetrPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
A_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("""only integers accepted as input""" )
else:
_UpperCAmelCase : Dict = str(abs(lowerCAmelCase_ ) )
_UpperCAmelCase : Optional[Any] = [list(lowerCAmelCase_ ) for char in range(len(lowerCAmelCase_ ) )]
for index in range(len(lowerCAmelCase_ ) ):
num_transpositions[index].pop(lowerCAmelCase_ )
return max(
int("""""".join(list(lowerCAmelCase_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 349 | 1 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = (DDPMParallelScheduler,)
def _snake_case ( self ,**a_ ) -> Dict:
_UpperCAmelCase : List[Any] = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**a_ )
return config
def _snake_case ( self ) -> str:
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a_ )
def _snake_case ( self ) -> List[str]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=a_ ,beta_end=a_ )
def _snake_case ( self ) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a_ )
def _snake_case ( self ) -> str:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=a_ )
def _snake_case ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a_ )
def _snake_case ( self ) -> Any:
self.check_over_configs(thresholding=a_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=a_ ,prediction_type=a_ ,sample_max_value=a_ ,)
def _snake_case ( self ) -> Union[str, Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=a_ )
def _snake_case ( self ) -> Optional[Any]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=a_ )
def _snake_case ( self ) -> Any:
_UpperCAmelCase : List[str] = self.scheduler_classes[0]
_UpperCAmelCase : Tuple = self.get_scheduler_config()
_UpperCAmelCase : List[Any] = scheduler_class(**a_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config()
_UpperCAmelCase : Optional[Any] = scheduler_class(**a_ )
_UpperCAmelCase : int = len(a_ )
_UpperCAmelCase : List[str] = self.dummy_model()
_UpperCAmelCase : List[Any] = self.dummy_sample_deter
_UpperCAmelCase : List[str] = self.dummy_sample_deter + 0.1
_UpperCAmelCase : Optional[Any] = self.dummy_sample_deter - 0.1
_UpperCAmelCase : Union[str, Any] = samplea.shape[0]
_UpperCAmelCase : Dict = torch.stack([samplea, samplea, samplea] ,dim=0 )
_UpperCAmelCase : List[str] = torch.arange(a_ )[0:3, None].repeat(1 ,a_ )
_UpperCAmelCase : Any = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
_UpperCAmelCase : List[Any] = scheduler.batch_step_no_noise(a_ ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) )
_UpperCAmelCase : str = torch.sum(torch.abs(a_ ) )
_UpperCAmelCase : List[Any] = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def _snake_case ( self ) -> Any:
_UpperCAmelCase : List[str] = self.scheduler_classes[0]
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : int = scheduler_class(**a_ )
_UpperCAmelCase : int = len(a_ )
_UpperCAmelCase : List[str] = self.dummy_model()
_UpperCAmelCase : Any = self.dummy_sample_deter
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
for t in reversed(range(a_ ) ):
# 1. predict noise residual
_UpperCAmelCase : Dict = model(a_ ,a_ )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : Optional[Any] = scheduler.step(a_ ,a_ ,a_ ,generator=a_ ).prev_sample
_UpperCAmelCase : int = pred_prev_sample
_UpperCAmelCase : Union[str, Any] = torch.sum(torch.abs(a_ ) )
_UpperCAmelCase : List[Any] = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def _snake_case ( self ) -> str:
_UpperCAmelCase : Tuple = self.scheduler_classes[0]
_UpperCAmelCase : int = self.get_scheduler_config(prediction_type="""v_prediction""" )
_UpperCAmelCase : Tuple = scheduler_class(**a_ )
_UpperCAmelCase : List[str] = len(a_ )
_UpperCAmelCase : Tuple = self.dummy_model()
_UpperCAmelCase : Optional[int] = self.dummy_sample_deter
_UpperCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(a_ ) ):
# 1. predict noise residual
_UpperCAmelCase : List[Any] = model(a_ ,a_ )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : List[Any] = scheduler.step(a_ ,a_ ,a_ ,generator=a_ ).prev_sample
_UpperCAmelCase : Any = pred_prev_sample
_UpperCAmelCase : Union[str, Any] = torch.sum(torch.abs(a_ ) )
_UpperCAmelCase : Optional[int] = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
_UpperCAmelCase : List[Any] = scheduler_class(**a_ )
_UpperCAmelCase : str = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=a_ )
_UpperCAmelCase : Dict = scheduler.timesteps
for i, timestep in enumerate(a_ ):
if i == len(a_ ) - 1:
_UpperCAmelCase : List[Any] = -1
else:
_UpperCAmelCase : int = timesteps[i + 1]
_UpperCAmelCase : Dict = scheduler.previous_timestep(a_ )
_UpperCAmelCase : str = prev_t.item()
self.assertEqual(a_ ,a_ )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : int = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config()
_UpperCAmelCase : Tuple = scheduler_class(**a_ )
_UpperCAmelCase : Optional[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(a_ ,msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=a_ )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
_UpperCAmelCase : Optional[int] = self.get_scheduler_config()
_UpperCAmelCase : Dict = scheduler_class(**a_ )
_UpperCAmelCase : List[str] = [100, 87, 50, 1, 0]
_UpperCAmelCase : Tuple = len(a_ )
with self.assertRaises(a_ ,msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=a_ ,timesteps=a_ )
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : str = self.scheduler_classes[0]
_UpperCAmelCase : List[str] = self.get_scheduler_config()
_UpperCAmelCase : Optional[int] = scheduler_class(**a_ )
_UpperCAmelCase : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a_ ,msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" ,):
scheduler.set_timesteps(timesteps=a_ )
| 349 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A_ : Dict = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> None:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), F'''{len(lowerCAmelCase_ )} != {len(lowerCAmelCase_ )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A_ : Union[str, Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A_ : int = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
try:
_UpperCAmelCase : Any = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[int]:
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(lowerCAmelCase_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ = "student" , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , )-> Tuple[PreTrainedModel, List[int], List[int]]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
AutoTokenizer.from_pretrained(lowerCAmelCase_ ).save_pretrained(lowerCAmelCase_ ) # purely for convenience
_UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ ).eval()
else:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), F'''teacher must be a model or string got type {type(lowerCAmelCase_ )}'''
_UpperCAmelCase : str = teacher.config.to_diff_dict()
try:
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCAmelCase : Tuple = teacher_e
if d is None:
_UpperCAmelCase : Dict = teacher_d
init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} )
except AttributeError: # T5
if hasattr(teacher.config , """num_encoder_layers""" ):
_UpperCAmelCase ,_UpperCAmelCase : int = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCAmelCase ,_UpperCAmelCase : int = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCAmelCase : List[str] = teacher_e
if d is None:
_UpperCAmelCase : str = teacher_d
if hasattr(teacher.config , """num_encoder_layers""" ):
init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} )
else:
init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase_ )
# Copy weights
_UpperCAmelCase : Any = teacher.config_class(**lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCAmelCase : Optional[Any] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = list(range(lowerCAmelCase_ ) ), list(range(lowerCAmelCase_ ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(lowerCAmelCase_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase_ , lowerCAmelCase_ )
if d_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase_ , lowerCAmelCase_ )
try:
if hasattr(
lowerCAmelCase_ , """prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase_ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase_ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase_ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase_ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase_ )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
_UpperCAmelCase : Dict = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 349 | 1 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def snake_case_ ( lowerCAmelCase_ )-> Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = emb.weight.shape
_UpperCAmelCase : Any = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_UpperCAmelCase : List[str] = emb.weight.data
return lin_layer
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_="facebook/mbart-large-en-ro" , lowerCAmelCase_=False , lowerCAmelCase_=False )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = torch.load(lowerCAmelCase_ , map_location="""cpu""" )["""model"""]
remove_ignore_keys_(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = state_dict["""encoder.embed_tokens.weight"""].shape[0]
_UpperCAmelCase : List[Any] = MBartConfig.from_pretrained(lowerCAmelCase_ , vocab_size=lowerCAmelCase_ )
if mbart_aa and finetuned:
_UpperCAmelCase : Union[str, Any] = """relu"""
_UpperCAmelCase : Dict = state_dict["""decoder.embed_tokens.weight"""]
_UpperCAmelCase : Any = MBartForConditionalGeneration(lowerCAmelCase_ )
model.model.load_state_dict(lowerCAmelCase_ )
if finetuned:
_UpperCAmelCase : List[Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
A_ : List[Any] = parser.parse_args()
A_ : str = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 349 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = right or len(lowerCAmelCase_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowerCAmelCase_ , lowerCAmelCase_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class lowercase ( logging.LoggerAdapter ):
"""simple docstring"""
@staticmethod
def _snake_case ( a_ ) -> Any:
_UpperCAmelCase : Optional[int] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _snake_case ( self ,a_ ,a_ ,*a_ ,**a_ ) -> Any:
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
_UpperCAmelCase : List[Any] = kwargs.pop("""main_process_only""" ,a_ )
_UpperCAmelCase : Tuple = kwargs.pop("""in_order""" ,a_ )
if self.isEnabledFor(a_ ):
if self._should_log(a_ ):
_UpperCAmelCase ,_UpperCAmelCase : List[Any] = self.process(a_ ,a_ )
self.logger.log(a_ ,a_ ,*a_ ,**a_ )
elif in_order:
_UpperCAmelCase : Tuple = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCAmelCase ,_UpperCAmelCase : List[str] = self.process(a_ ,a_ )
self.logger.log(a_ ,a_ ,*a_ ,**a_ )
state.wait_for_everyone()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ = None )-> Tuple:
'''simple docstring'''
if log_level is None:
_UpperCAmelCase : Dict = os.environ.get("""ACCELERATE_LOG_LEVEL""" , lowerCAmelCase_ )
_UpperCAmelCase : Dict = logging.getLogger(lowerCAmelCase_ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowerCAmelCase_ , {} )
| 349 |
'''simple docstring'''
from datetime import datetime
import requests
def snake_case_ ( lowerCAmelCase_ )-> bytes:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
_UpperCAmelCase : Dict = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(lowerCAmelCase_ ).content
if __name__ == "__main__":
A_ : Union[str, Any] = input("""Enter Video/IGTV url: """).strip()
A_ : Dict = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 349 | 1 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ )-> list[int]:
'''simple docstring'''
if len(lowerCAmelCase_ ) == 0:
return array
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = min(lowerCAmelCase_ ), max(lowerCAmelCase_ )
# Compute the variables
_UpperCAmelCase : Optional[int] = _max - _min + 1
_UpperCAmelCase ,_UpperCAmelCase : int = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_UpperCAmelCase : Optional[int] = i - _min
_UpperCAmelCase : Tuple = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_UpperCAmelCase : Optional[Any] = 0
for i in range(lowerCAmelCase_ ):
while holes_repeat[i] > 0:
_UpperCAmelCase : List[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[int] = input("""Enter numbers separated by comma:\n""")
A_ : int = [int(x) for x in user_input.split(""",""")]
print(pigeon_sort(unsorted))
| 349 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : List[str] = 3
_UpperCAmelCase : Union[str, Any] = (32, 32)
_UpperCAmelCase : str = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(a_ )
return image
@property
def _snake_case ( self ) -> List[Any]:
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def _snake_case ( self ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCAmelCase : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def _snake_case ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModel(a_ )
@property
def _snake_case ( self ) -> Union[str, Any]:
def extract(*a_ ,**a_ ):
class lowercase :
"""simple docstring"""
def __init__( self ) -> Any:
_UpperCAmelCase : str = torch.ones([0] )
def _snake_case ( self ,a_ ) -> Any:
self.pixel_values.to(a_ )
return self
return Out()
return extract
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet
_UpperCAmelCase : int = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=a_ ,set_alpha_to_one=a_ ,)
_UpperCAmelCase : Optional[int] = self.dummy_vae
_UpperCAmelCase : Optional[int] = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : int = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Union[str, Any] = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Optional[int] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : int = output.images
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : str = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Tuple = self.dummy_cond_unet
_UpperCAmelCase : Optional[int] = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : int = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : str = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : str = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : int = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Any = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : List[Any] = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : List[Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : Any = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Union[str, Any] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=a_ )
assert isinstance(a_ ,a_ )
assert isinstance(pipe.scheduler ,a_ )
assert pipe.safety_checker is None
_UpperCAmelCase : Dict = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained(a_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_UpperCAmelCase : Union[str, Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Optional[int] = self.dummy_cond_unet
_UpperCAmelCase : str = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : List[str] = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
_UpperCAmelCase : str = unet.half()
_UpperCAmelCase : List[str] = vae.half()
_UpperCAmelCase : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Dict = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : str = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : int = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Dict = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : List[Any] = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
_UpperCAmelCase : Any = 4_003_660_346
_UpperCAmelCase : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : str = output.images
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
_UpperCAmelCase : List[str] = torch.manual_seed(a_ )
_UpperCAmelCase : Optional[Any] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> int:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
_UpperCAmelCase : Optional[Any] = 2_734_971_755
_UpperCAmelCase : Optional[int] = 7
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : Optional[int] = output.images
_UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[int] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
_UpperCAmelCase : Optional[int] = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Union[str, Any] = output.images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Optional[int] = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
_UpperCAmelCase : Dict = 1_044_355_234
_UpperCAmelCase : int = 12
_UpperCAmelCase : Optional[Any] = torch.manual_seed(a_ )
_UpperCAmelCase : List[str] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
_UpperCAmelCase : Tuple = torch.manual_seed(a_ )
_UpperCAmelCase : Dict = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Optional[Any] = output.images
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 349 | 1 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase :
"""simple docstring"""
@staticmethod
def _snake_case ( *a_ ,**a_ ) -> Optional[Any]:
pass
@is_pipeline_test
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : List[Any] = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" ,)
_UpperCAmelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_UpperCAmelCase : List[Any] = image_classifier(a_ ,candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(a_ ) ,[
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}],
] ,)
_UpperCAmelCase : Dict = image_classifier([image] * 5 ,candidate_labels=["""A""", """B""", """C"""] ,batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) ,[
[
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
],
[
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
],
[
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
],
[
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
],
[
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
],
] ,)
@require_tf
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : int = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" ,framework="""tf""" )
_UpperCAmelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_UpperCAmelCase : Any = image_classifier(a_ ,candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(a_ ) ,[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}] ,)
_UpperCAmelCase : List[str] = image_classifier([image] * 5 ,candidate_labels=["""A""", """B""", """C"""] ,batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) ,[
[
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
],
[
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
],
[
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
],
[
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
],
[
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
],
] ,)
@slow
@require_torch
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Union[str, Any] = pipeline(
task="""zero-shot-image-classification""" ,model="""openai/clip-vit-base-patch32""" ,)
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_UpperCAmelCase : Optional[int] = image_classifier(a_ ,candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(a_ ) ,[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] ,)
_UpperCAmelCase : Optional[int] = image_classifier([image] * 5 ,candidate_labels=["""cat""", """plane""", """remote"""] ,batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) ,[
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 ,)
@slow
@require_tf
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : Any = pipeline(
task="""zero-shot-image-classification""" ,model="""openai/clip-vit-base-patch32""" ,framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_UpperCAmelCase : List[Any] = image_classifier(a_ ,candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(a_ ) ,[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] ,)
_UpperCAmelCase : Dict = image_classifier([image] * 5 ,candidate_labels=["""cat""", """plane""", """remote"""] ,batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) ,[
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 ,)
| 349 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : str = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : List[str] = {"""vocab_file""": """spm_char.model"""}
A_ : Dict = {
"""vocab_file""": {
"""microsoft/speecht5_asr""": """https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model""",
"""microsoft/speecht5_tts""": """https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model""",
"""microsoft/speecht5_vc""": """https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model""",
}
}
A_ : Tuple = {
"""microsoft/speecht5_asr""": 1_0_2_4,
"""microsoft/speecht5_tts""": 1_0_2_4,
"""microsoft/speecht5_vc""": 1_0_2_4,
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self ,a_ ,a_="<s>" ,a_="</s>" ,a_="<unk>" ,a_="<pad>" ,a_ = None ,**a_ ,) -> None:
_UpperCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a_ ,eos_token=a_ ,unk_token=a_ ,pad_token=a_ ,sp_model_kwargs=self.sp_model_kwargs ,**a_ ,)
_UpperCAmelCase : Optional[Any] = vocab_file
_UpperCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def _snake_case ( self ) -> Any:
return self.sp_model.get_piece_size()
def _snake_case ( self ) -> int:
_UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
_UpperCAmelCase : Optional[Any] = self.__dict__.copy()
_UpperCAmelCase : Any = None
return state
def __setstate__( self ,a_ ) -> Any:
_UpperCAmelCase : str = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self ,a_ ) -> List[str]:
return self.sp_model.encode(a_ ,out_type=a_ )
def _snake_case ( self ,a_ ) -> Union[str, Any]:
return self.sp_model.piece_to_id(a_ )
def _snake_case ( self ,a_ ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.sp_model.IdToPiece(a_ )
return token
def _snake_case ( self ,a_ ) -> int:
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Union[str, Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a_ ) + token
_UpperCAmelCase : str = []
else:
current_sub_tokens.append(a_ )
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def _snake_case ( self ,a_ ,a_=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self ,a_ ,a_ = None ,a_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ ,token_ids_a=a_ ,already_has_special_tokens=a_ )
_UpperCAmelCase : Any = [1]
if token_ids_a is None:
return ([0] * len(a_ )) + suffix_ones
return ([0] * len(a_ )) + ([0] * len(a_ )) + suffix_ones
def _snake_case ( self ,a_ ,a_ = None ) -> Tuple[str]:
if not os.path.isdir(a_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase : Dict = os.path.join(
a_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ ,"""wb""" ) as fi:
_UpperCAmelCase : int = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 349 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Any = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """yolos"""
def __init__( self ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.0 ,a_=0.0 ,a_=0.02 ,a_=1E-1_2 ,a_=[512, 864] ,a_=16 ,a_=3 ,a_=True ,a_=100 ,a_=True ,a_=False ,a_=1 ,a_=5 ,a_=2 ,a_=5 ,a_=2 ,a_=0.1 ,**a_ ,) -> List[str]:
super().__init__(**a_ )
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : int = image_size
_UpperCAmelCase : Dict = patch_size
_UpperCAmelCase : Tuple = num_channels
_UpperCAmelCase : Optional[Any] = qkv_bias
_UpperCAmelCase : List[Any] = num_detection_tokens
_UpperCAmelCase : Tuple = use_mid_position_embeddings
_UpperCAmelCase : int = auxiliary_loss
# Hungarian matcher
_UpperCAmelCase : Dict = class_cost
_UpperCAmelCase : Dict = bbox_cost
_UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCAmelCase : int = bbox_loss_coefficient
_UpperCAmelCase : Optional[Any] = giou_loss_coefficient
_UpperCAmelCase : Union[str, Any] = eos_coefficient
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1E-4
@property
def _snake_case ( self ) -> int:
return 12
| 349 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = 42
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self ,a_=3 ,a_=3 ,a_=("DownEncoderBlock2D",) ,a_=(64,) ,a_=2 ,a_=32 ,a_="silu" ,a_=True ,) -> Dict:
super().__init__()
_UpperCAmelCase : List[Any] = layers_per_block
_UpperCAmelCase : Tuple = torch.nn.Convad(
a_ ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,)
_UpperCAmelCase : str = None
_UpperCAmelCase : int = nn.ModuleList([] )
# down
_UpperCAmelCase : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(a_ ):
_UpperCAmelCase : Union[str, Any] = output_channel
_UpperCAmelCase : Any = block_out_channels[i]
_UpperCAmelCase : List[Any] = i == len(a_ ) - 1
_UpperCAmelCase : Tuple = get_down_block(
a_ ,num_layers=self.layers_per_block ,in_channels=a_ ,out_channels=a_ ,add_downsample=not is_final_block ,resnet_eps=1E-6 ,downsample_padding=0 ,resnet_act_fn=a_ ,resnet_groups=a_ ,attention_head_dim=a_ ,temb_channels=a_ ,)
self.down_blocks.append(a_ )
# mid
_UpperCAmelCase : Tuple = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1E-6 ,resnet_act_fn=a_ ,output_scale_factor=1 ,resnet_time_scale_shift="""default""" ,attention_head_dim=block_out_channels[-1] ,resnet_groups=a_ ,temb_channels=a_ ,)
# out
_UpperCAmelCase : List[str] = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=a_ ,eps=1E-6 )
_UpperCAmelCase : List[Any] = nn.SiLU()
_UpperCAmelCase : int = 2 * out_channels if double_z else out_channels
_UpperCAmelCase : Optional[Any] = nn.Convad(block_out_channels[-1] ,a_ ,3 ,padding=1 )
_UpperCAmelCase : Optional[Any] = False
def _snake_case ( self ,a_ ) -> Optional[Any]:
_UpperCAmelCase : Any = x
_UpperCAmelCase : int = self.conv_in(a_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(a_ ):
def custom_forward(*a_ ):
return module(*a_ )
return custom_forward
# down
if is_torch_version(""">=""" ,"""1.11.0""" ):
for down_block in self.down_blocks:
_UpperCAmelCase : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(a_ ) ,a_ ,use_reentrant=a_ )
# middle
_UpperCAmelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,a_ ,use_reentrant=a_ )
else:
for down_block in self.down_blocks:
_UpperCAmelCase : List[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(a_ ) ,a_ )
# middle
_UpperCAmelCase : Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,a_ )
else:
# down
for down_block in self.down_blocks:
_UpperCAmelCase : Tuple = down_block(a_ )
# middle
_UpperCAmelCase : Dict = self.mid_block(a_ )
# post-process
_UpperCAmelCase : str = self.conv_norm_out(a_ )
_UpperCAmelCase : Dict = self.conv_act(a_ )
_UpperCAmelCase : Optional[int] = self.conv_out(a_ )
return sample
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self ,a_=3 ,a_=3 ,a_=("UpDecoderBlock2D",) ,a_=(64,) ,a_=2 ,a_=32 ,a_="silu" ,a_="group" ,) -> List[Any]:
super().__init__()
_UpperCAmelCase : List[str] = layers_per_block
_UpperCAmelCase : int = nn.Convad(
a_ ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,)
_UpperCAmelCase : Dict = None
_UpperCAmelCase : List[str] = nn.ModuleList([] )
_UpperCAmelCase : Optional[Any] = in_channels if norm_type == """spatial""" else None
# mid
_UpperCAmelCase : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1E-6 ,resnet_act_fn=a_ ,output_scale_factor=1 ,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=a_ ,temb_channels=a_ ,)
# up
_UpperCAmelCase : str = list(reversed(a_ ) )
_UpperCAmelCase : str = reversed_block_out_channels[0]
for i, up_block_type in enumerate(a_ ):
_UpperCAmelCase : str = output_channel
_UpperCAmelCase : Optional[int] = reversed_block_out_channels[i]
_UpperCAmelCase : Dict = i == len(a_ ) - 1
_UpperCAmelCase : List[str] = get_up_block(
a_ ,num_layers=self.layers_per_block + 1 ,in_channels=a_ ,out_channels=a_ ,prev_output_channel=a_ ,add_upsample=not is_final_block ,resnet_eps=1E-6 ,resnet_act_fn=a_ ,resnet_groups=a_ ,attention_head_dim=a_ ,temb_channels=a_ ,resnet_time_scale_shift=a_ ,)
self.up_blocks.append(a_ )
_UpperCAmelCase : Union[str, Any] = output_channel
# out
if norm_type == "spatial":
_UpperCAmelCase : Optional[Any] = SpatialNorm(block_out_channels[0] ,a_ )
else:
_UpperCAmelCase : List[str] = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=a_ ,eps=1E-6 )
_UpperCAmelCase : Optional[int] = nn.SiLU()
_UpperCAmelCase : Any = nn.Convad(block_out_channels[0] ,a_ ,3 ,padding=1 )
_UpperCAmelCase : List[Any] = False
def _snake_case ( self ,a_ ,a_=None ) -> Any:
_UpperCAmelCase : Optional[Any] = z
_UpperCAmelCase : List[str] = self.conv_in(a_ )
_UpperCAmelCase : Optional[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(a_ ):
def custom_forward(*a_ ):
return module(*a_ )
return custom_forward
if is_torch_version(""">=""" ,"""1.11.0""" ):
# middle
_UpperCAmelCase : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,a_ ,a_ ,use_reentrant=a_ )
_UpperCAmelCase : Optional[Any] = sample.to(a_ )
# up
for up_block in self.up_blocks:
_UpperCAmelCase : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(a_ ) ,a_ ,a_ ,use_reentrant=a_ )
else:
# middle
_UpperCAmelCase : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,a_ ,a_ )
_UpperCAmelCase : Dict = sample.to(a_ )
# up
for up_block in self.up_blocks:
_UpperCAmelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(a_ ) ,a_ ,a_ )
else:
# middle
_UpperCAmelCase : List[Any] = self.mid_block(a_ ,a_ )
_UpperCAmelCase : str = sample.to(a_ )
# up
for up_block in self.up_blocks:
_UpperCAmelCase : Union[str, Any] = up_block(a_ ,a_ )
# post-process
if latent_embeds is None:
_UpperCAmelCase : Optional[Any] = self.conv_norm_out(a_ )
else:
_UpperCAmelCase : Optional[Any] = self.conv_norm_out(a_ ,a_ )
_UpperCAmelCase : Optional[Any] = self.conv_act(a_ )
_UpperCAmelCase : List[str] = self.conv_out(a_ )
return sample
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self ,a_ ,a_ ,a_ ,a_=None ,a_="random" ,a_=False ,a_=True ) -> List[str]:
super().__init__()
_UpperCAmelCase : Dict = n_e
_UpperCAmelCase : Any = vq_embed_dim
_UpperCAmelCase : Union[str, Any] = beta
_UpperCAmelCase : Tuple = legacy
_UpperCAmelCase : List[Any] = nn.Embedding(self.n_e ,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e )
_UpperCAmelCase : str = remap
if self.remap is not None:
self.register_buffer("""used""" ,torch.tensor(np.load(self.remap ) ) )
_UpperCAmelCase : Optional[Any] = self.used.shape[0]
_UpperCAmelCase : str = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCAmelCase : Tuple = self.re_embed
_UpperCAmelCase : int = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCAmelCase : Any = n_e
_UpperCAmelCase : Dict = sane_index_shape
def _snake_case ( self ,a_ ) -> Tuple:
_UpperCAmelCase : Optional[Any] = inds.shape
assert len(a_ ) > 1
_UpperCAmelCase : str = inds.reshape(ishape[0] ,-1 )
_UpperCAmelCase : List[Any] = self.used.to(a_ )
_UpperCAmelCase : Any = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCAmelCase : List[Any] = match.argmax(-1 )
_UpperCAmelCase : str = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCAmelCase : int = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device )
else:
_UpperCAmelCase : str = self.unknown_index
return new.reshape(a_ )
def _snake_case ( self ,a_ ) -> Optional[int]:
_UpperCAmelCase : Optional[Any] = inds.shape
assert len(a_ ) > 1
_UpperCAmelCase : int = inds.reshape(ishape[0] ,-1 )
_UpperCAmelCase : Union[str, Any] = self.used.to(a_ )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCAmelCase : Tuple = 0 # simply set to zero
_UpperCAmelCase : Any = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,a_ )
return back.reshape(a_ )
def _snake_case ( self ,a_ ) -> Union[str, Any]:
# reshape z -> (batch, height, width, channel) and flatten
_UpperCAmelCase : Optional[int] = z.permute(0 ,2 ,3 ,1 ).contiguous()
_UpperCAmelCase : Optional[Any] = z.view(-1 ,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCAmelCase : Dict = torch.argmin(torch.cdist(a_ ,self.embedding.weight ) ,dim=1 )
_UpperCAmelCase : List[Any] = self.embedding(a_ ).view(z.shape )
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Dict = None
# compute loss for embedding
if not self.legacy:
_UpperCAmelCase : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCAmelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCAmelCase : Any = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCAmelCase : Optional[Any] = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
if self.remap is not None:
_UpperCAmelCase : Union[str, Any] = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis
_UpperCAmelCase : str = self.remap_to_used(a_ )
_UpperCAmelCase : Dict = min_encoding_indices.reshape(-1 ,1 ) # flatten
if self.sane_index_shape:
_UpperCAmelCase : Union[str, Any] = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _snake_case ( self ,a_ ,a_ ) -> str:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_UpperCAmelCase : List[Any] = indices.reshape(shape[0] ,-1 ) # add batch axis
_UpperCAmelCase : Dict = self.unmap_to_all(a_ )
_UpperCAmelCase : Dict = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCAmelCase : Optional[Any] = self.embedding(a_ )
if shape is not None:
_UpperCAmelCase : int = z_q.view(a_ )
# reshape back to match original input shape
_UpperCAmelCase : str = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
return z_q
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,a_ ,a_=False ) -> List[str]:
_UpperCAmelCase : Any = parameters
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = torch.chunk(a_ ,2 ,dim=1 )
_UpperCAmelCase : List[Any] = torch.clamp(self.logvar ,-30.0 ,20.0 )
_UpperCAmelCase : List[str] = deterministic
_UpperCAmelCase : Tuple = torch.exp(0.5 * self.logvar )
_UpperCAmelCase : List[str] = torch.exp(self.logvar )
if self.deterministic:
_UpperCAmelCase : List[str] = torch.zeros_like(
self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype )
def _snake_case ( self ,a_ = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
_UpperCAmelCase : str = randn_tensor(
self.mean.shape ,generator=a_ ,device=self.parameters.device ,dtype=self.parameters.dtype )
_UpperCAmelCase : Optional[Any] = self.mean + self.std * sample
return x
def _snake_case ( self ,a_=None ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean ,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar ,dim=[1, 2, 3] ,)
def _snake_case ( self ,a_ ,a_=[1, 2, 3] ) -> Tuple:
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCAmelCase : Optional[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=a_ )
def _snake_case ( self ) -> Optional[int]:
return self.mean
| 349 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Any = [10, 20, 30, 40, 50, 60]
_UpperCAmelCase : Dict = [2, 4, 6, 8, 10, 12]
_UpperCAmelCase : Optional[int] = 100
self.assertEqual(kp.calc_profit(a_ ,a_ ,a_ ) ,210 )
def _snake_case ( self ) -> Union[str, Any]:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Any:
self.assertRaisesRegex(a_ ,"""Weight can not be negative.""" )
def _snake_case ( self ) -> Optional[Any]:
self.assertRaisesRegex(a_ ,"""Profit can not be negative.""" )
def _snake_case ( self ) -> Dict:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Tuple:
self.assertRaisesRegex(
a_ ,"""The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 349 | 1 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : int = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def snake_case_ ( lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def snake_case_ ( lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Optional[Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowerCAmelCase_ , id=lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
if exitstatus == 5:
_UpperCAmelCase : List[str] = 0
# Doctest custom flag to ignore output.
A_ : Optional[Any] = doctest.register_optionflag("""IGNORE_RESULT""")
A_ : Tuple = doctest.OutputChecker
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def _snake_case ( self ,a_ ,a_ ,a_ ) -> List[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self ,a_ ,a_ ,a_ )
A_ : str = CustomOutputChecker
A_ : str = HfDoctestModule
A_ : Optional[Any] = HfDocTestParser
| 349 |
'''simple docstring'''
from __future__ import annotations
import math
def snake_case_ ( lowerCAmelCase_ )-> list[int]:
'''simple docstring'''
if num <= 0:
_UpperCAmelCase : List[Any] = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = [True] * (num + 1)
_UpperCAmelCase : int = []
_UpperCAmelCase : int = 2
_UpperCAmelCase : int = int(math.sqrt(lowerCAmelCase_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCAmelCase_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCAmelCase_ ):
if sieve[i] is True:
_UpperCAmelCase : Tuple = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowerCAmelCase_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 349 | 1 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
A_ : Optional[int] = float("""nan""")
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ) -> int:
_UpperCAmelCase : Union[str, Any] = sys.stdout
_UpperCAmelCase : Dict = open(a_ ,"""a""" )
def __getattr__( self ,a_ ) -> Any:
return getattr(self.stdout ,a_ )
def _snake_case ( self ,a_ ) -> List[Any]:
self.stdout.write(a_ )
# strip tqdm codes
self.file.write(re.sub(r"""^.*\r""" ,"""""" ,a_ ,0 ,re.M ) )
def snake_case_ ( lowerCAmelCase_=80 , lowerCAmelCase_=False )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = []
# deal with critical env vars
_UpperCAmelCase : List[str] = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
_UpperCAmelCase : Optional[Any] = os.environ.get(lowerCAmelCase_ , lowerCAmelCase_ )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
_UpperCAmelCase : Tuple = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(lowerCAmelCase_ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_UpperCAmelCase : int = []
_UpperCAmelCase : Union[str, Any] = """"""
while len(lowerCAmelCase_ ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(lowerCAmelCase_ ) == 0 or len(lowerCAmelCase_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(lowerCAmelCase_ )
_UpperCAmelCase : Any = """"""
return "\\\n".join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> str:
'''simple docstring'''
_UpperCAmelCase : str = re.sub(R"""[\\\n]+""" , """ """ , args.base_cmd )
# remove --output_dir if any and set our own
_UpperCAmelCase : Tuple = re.sub("""--output_dir\s+[^\s]+""" , """""" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
_UpperCAmelCase : List[str] = re.sub("""--overwrite_output_dir\s+""" , """""" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 1_0.3_1, 1_0_0.2, 5_5.6_6_6_6, 2_2_2.2_2_2_2_2_2_2_2] )} , )
_UpperCAmelCase : Optional[int] = subprocess.run(lowerCAmelCase_ , capture_output=lowerCAmelCase_ , text=lowerCAmelCase_ )
if verbose:
print("""STDOUT""" , result.stdout )
print("""STDERR""" , result.stderr )
# save the streams
_UpperCAmelCase : Union[str, Any] = variation.replace(""" """ , """-""" )
with open(Path(lowerCAmelCase_ ) / F'''log.{prefix}.stdout.txt''' , """w""" ) as f:
f.write(result.stdout )
with open(Path(lowerCAmelCase_ ) / F'''log.{prefix}.stderr.txt''' , """w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , """r""" , encoding="""utf-8""" ) as f:
_UpperCAmelCase : Union[str, Any] = json.load(lowerCAmelCase_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> List[str]:
'''simple docstring'''
_UpperCAmelCase : int = []
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : List[Any] = F'''{id}: {variation:<{longest_variation_len}}'''
_UpperCAmelCase : Tuple = F'''{preamble}: '''
_UpperCAmelCase : Any = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(lowerCAmelCase_ ) , desc=lowerCAmelCase_ , leave=lowerCAmelCase_ ):
_UpperCAmelCase : int = process_run_single(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : str = single_run_metrics[target_metric_key]
if not math.isnan(lowerCAmelCase_ ):
metrics.append(lowerCAmelCase_ )
results.append(lowerCAmelCase_ )
outcome += "✓"
else:
outcome += "✘"
_UpperCAmelCase : int = F'''\33[2K\r{outcome}'''
if len(lowerCAmelCase_ ) > 0:
_UpperCAmelCase : Union[str, Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_UpperCAmelCase : Optional[Any] = round(mean_metrics[target_metric_key] , 2 )
_UpperCAmelCase : int = F'''{outcome} {mean_target}'''
if len(lowerCAmelCase_ ) > 1:
results_str += F''' {tuple(round(lowerCAmelCase_ , 2 ) for x in results )}'''
print(lowerCAmelCase_ )
_UpperCAmelCase : int = variation
return mean_metrics
else:
print(lowerCAmelCase_ )
return {variation_key: variation, target_metric_key: nan}
def snake_case_ ( )-> List[str]:
'''simple docstring'''
_UpperCAmelCase : str = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return F'''
Datetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = pd.DataFrame(lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = """variation"""
_UpperCAmelCase : Tuple = """diff_%"""
_UpperCAmelCase : int = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_UpperCAmelCase : Any = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(lowerCAmelCase_ ):
# as a fallback, use the minimal value as the sentinel
_UpperCAmelCase : List[Any] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(lowerCAmelCase_ ):
_UpperCAmelCase : Any = df.apply(
lambda lowerCAmelCase_ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="""columns""" , )
# re-order columns
_UpperCAmelCase : List[str] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_UpperCAmelCase : Optional[int] = df.reindex(lowerCAmelCase_ , axis="""columns""" ) # reorder cols
# capitalize
_UpperCAmelCase : str = df.rename(str.capitalize , axis="""columns""" )
# make the cols as narrow as possible
_UpperCAmelCase : Union[str, Any] = df.rename(lambda lowerCAmelCase_ : c.replace("""_""" , """<br>""" ) , axis="""columns""" )
_UpperCAmelCase : int = df.rename(lambda lowerCAmelCase_ : c.replace("""_""" , """\n""" ) , axis="""columns""" )
_UpperCAmelCase : int = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=lowerCAmelCase_ , floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=lowerCAmelCase_ , floatfmt=""".2f""" )]
print("""\n\n""".join(lowerCAmelCase_ ) )
def snake_case_ ( )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""Base cmd""" , )
parser.add_argument(
"""--variations""" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , nargs="""+""" , required=lowerCAmelCase_ , help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" , )
parser.add_argument(
"""--base-variation""" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" , )
parser.add_argument(
"""--target-metric-key""" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" , )
parser.add_argument(
"""--report-metric-keys""" , default="""""" , type=lowerCAmelCase_ , help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" , )
parser.add_argument(
"""--repeat-times""" , default=1 , type=lowerCAmelCase_ , help="""How many times to re-run each variation - an average will be reported""" , )
parser.add_argument(
"""--output_dir""" , default="""output_benchmark""" , type=lowerCAmelCase_ , help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" , )
parser.add_argument(
"""--verbose""" , default=lowerCAmelCase_ , action="""store_true""" , help="""Whether to show the outputs of each run or just the benchmark progress""" , )
_UpperCAmelCase : Optional[int] = parser.parse_args()
_UpperCAmelCase : Tuple = args.output_dir
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
_UpperCAmelCase : Dict = get_base_command(lowerCAmelCase_ , lowerCAmelCase_ )
# split each dimension into its --foo variations
_UpperCAmelCase : Tuple = [list(map(str.strip , re.split(R"""\|""" , lowerCAmelCase_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_UpperCAmelCase : Union[str, Any] = list(map(str.strip , map(""" """.join , itertools.product(*lowerCAmelCase_ ) ) ) )
_UpperCAmelCase : str = max(len(lowerCAmelCase_ ) for x in variations )
# split wanted keys
_UpperCAmelCase : Optional[Any] = args.report_metric_keys.split()
# capture prints into a log file for convenience
_UpperCAmelCase : str = F'''benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
_UpperCAmelCase : Optional[int] = Tee(lowerCAmelCase_ )
print(F'''\n*** Running {len(lowerCAmelCase_ )} benchmarks:''' )
print(F'''Base command: {' '.join(lowerCAmelCase_ )}''' )
_UpperCAmelCase : Any = """variation"""
_UpperCAmelCase : List[Any] = []
for id, variation in enumerate(tqdm(lowerCAmelCase_ , desc="""Total completion: """ , leave=lowerCAmelCase_ ) ):
_UpperCAmelCase : str = base_cmd + variation.split()
results.append(
process_run(
id + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , args.target_metric_key , lowerCAmelCase_ , args.repeat_times , lowerCAmelCase_ , args.verbose , ) )
process_results(lowerCAmelCase_ , args.target_metric_key , lowerCAmelCase_ , args.base_variation , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 349 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,a_ ,a_ = None ,a_ = None ,a_ = True ,a_ = None ,a_ = False ,a_ = None ,a_ = True ,a_ = "arrow" ,**a_ ,) -> str:
super().__init__(
split=a_ ,features=a_ ,cache_dir=a_ ,keep_in_memory=a_ ,streaming=a_ ,**a_ ,)
_UpperCAmelCase : Any = load_from_cache_file
_UpperCAmelCase : Optional[int] = file_format
_UpperCAmelCase : int = Spark(
df=a_ ,features=a_ ,cache_dir=a_ ,working_dir=a_ ,**a_ ,)
def _snake_case ( self ) -> int:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_UpperCAmelCase : str = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=a_ ,file_format=self._file_format ,)
return self.builder.as_dataset(split=self.split )
| 349 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = None
class lowercase ( _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = 2
@register_to_config
def __init__( self ,a_ = 0.02 ,a_ = 100 ,a_ = 1.007 ,a_ = 80 ,a_ = 0.05 ,a_ = 50 ,) -> Dict:
# standard deviation of the initial noise distribution
_UpperCAmelCase : Dict = sigma_max
# setable values
_UpperCAmelCase : int = None
_UpperCAmelCase : np.IntTensor = None
_UpperCAmelCase : torch.FloatTensor = None # sigma(t_i)
def _snake_case ( self ,a_ ,a_ = None ) -> torch.FloatTensor:
return sample
def _snake_case ( self ,a_ ,a_ = None ) -> List[Any]:
_UpperCAmelCase : Optional[Any] = num_inference_steps
_UpperCAmelCase : List[Any] = np.arange(0 ,self.num_inference_steps )[::-1].copy()
_UpperCAmelCase : Dict = torch.from_numpy(a_ ).to(a_ )
_UpperCAmelCase : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_UpperCAmelCase : Tuple = torch.tensor(a_ ,dtype=torch.floataa ,device=a_ )
def _snake_case ( self ,a_ ,a_ ,a_ = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
_UpperCAmelCase : Any = min(self.config.s_churn / self.num_inference_steps ,2**0.5 - 1 )
else:
_UpperCAmelCase : Tuple = 0
# sample eps ~ N(0, S_noise^2 * I)
_UpperCAmelCase : Union[str, Any] = self.config.s_noise * randn_tensor(sample.shape ,generator=a_ ).to(sample.device )
_UpperCAmelCase : str = sigma + gamma * sigma
_UpperCAmelCase : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ = True ,) -> Union[KarrasVeOutput, Tuple]:
_UpperCAmelCase : str = sample_hat + sigma_hat * model_output
_UpperCAmelCase : Union[str, Any] = (sample_hat - pred_original_sample) / sigma_hat
_UpperCAmelCase : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=a_ ,derivative=a_ ,pred_original_sample=a_ )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ = True ,) -> Union[KarrasVeOutput, Tuple]:
_UpperCAmelCase : Optional[Any] = sample_prev + sigma_prev * model_output
_UpperCAmelCase : str = (sample_prev - pred_original_sample) / sigma_prev
_UpperCAmelCase : Any = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=a_ ,derivative=a_ ,pred_original_sample=a_ )
def _snake_case ( self ,a_ ,a_ ,a_ ) -> Optional[Any]:
raise NotImplementedError()
| 349 |
'''simple docstring'''
A_ : Optional[Any] = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 349 | 1 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def snake_case_ ( lowerCAmelCase_ )-> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> np.ndarray:
'''simple docstring'''
_UpperCAmelCase : List[Any] = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase_ , lowerCAmelCase_ )
# Predict target for test data
_UpperCAmelCase : List[Any] = xgb.predict(lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = predictions.reshape(len(lowerCAmelCase_ ) , 1 )
return predictions
def snake_case_ ( )-> None:
'''simple docstring'''
_UpperCAmelCase : int = fetch_california_housing()
_UpperCAmelCase ,_UpperCAmelCase : str = data_handling(lowerCAmelCase_ )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = train_test_split(
lowerCAmelCase_ , lowerCAmelCase_ , test_size=0.2_5 , random_state=1 )
_UpperCAmelCase : str = xgboost(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase_ , lowerCAmelCase_ )}''' )
print(F'''Mean Square Error : {mean_squared_error(lowerCAmelCase_ , lowerCAmelCase_ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 349 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def snake_case_ ( )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
_UpperCAmelCase : str = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCAmelCase_ , """func""" ):
parser.print_help()
exit(1 )
# Run
_UpperCAmelCase : Optional[int] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 349 | 1 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
A_ : Optional[int] = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
A_ : Any = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
A_ : List[str] = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def snake_case_ ( lowerCAmelCase_ )-> dict[str, int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def snake_case_ ( lowerCAmelCase_ )-> str:
'''simple docstring'''
return x[0]
def snake_case_ ( lowerCAmelCase_ )-> str:
'''simple docstring'''
_UpperCAmelCase : int = get_letter_count(lowerCAmelCase_ )
_UpperCAmelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCAmelCase_ )
_UpperCAmelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = """""".join(freq_to_letter[freq] )
_UpperCAmelCase : Optional[Any] = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCAmelCase_ , reverse=lowerCAmelCase_ )
_UpperCAmelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = get_frequency_order(lowerCAmelCase_ )
_UpperCAmelCase : Dict = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
'''simple docstring'''
import math
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : str = len(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
_UpperCAmelCase : int = 0
while arr[min(lowerCAmelCase_ , lowerCAmelCase_ ) - 1] < x:
_UpperCAmelCase : Optional[int] = step
step += int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_UpperCAmelCase : List[Any] = prev + 1
if prev == min(lowerCAmelCase_ , lowerCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A_ : str = input("""Enter numbers separated by a comma:\n""").strip()
A_ : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
A_ : int = int(input("""Enter the number to be searched:\n"""))
A_ : Any = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(f"""Number {x} is at index {res}""")
| 349 | 1 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ )-> bool:
'''simple docstring'''
_UpperCAmelCase : Tuple = [int(lowerCAmelCase_ ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(lowerCAmelCase_ ) == 4 and all(0 <= int(lowerCAmelCase_ ) <= 254 for octet in octets )
if __name__ == "__main__":
A_ : Any = input().strip()
A_ : Union[str, Any] = """valid""" if is_ip_va_address_valid(ip) else """invalid"""
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 349 |
'''simple docstring'''
import argparse
import copy
def snake_case_ ( lowerCAmelCase_ )-> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = {}
with open(lowerCAmelCase_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_UpperCAmelCase : Optional[int] = []
_list.append([line.split()[1], line.split()[2]] )
_UpperCAmelCase : List[str] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_UpperCAmelCase : List[str] = []
_list.append([line.split()[0], line.split()[2]] )
_UpperCAmelCase : Optional[int] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
with open(lowerCAmelCase_ ) as f:
_UpperCAmelCase : List[Any] = f.read(1 )
_UpperCAmelCase : int = start_node
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Dict = start_node
_UpperCAmelCase : Any = 0
while visiting not in first_solution:
_UpperCAmelCase : Optional[int] = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowerCAmelCase_ ) and k[0] not in first_solution:
_UpperCAmelCase : Optional[int] = k[1]
_UpperCAmelCase : List[str] = k[0]
first_solution.append(lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = distance_of_first_solution + int(lowerCAmelCase_ )
_UpperCAmelCase : Dict = best_node
first_solution.append(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_UpperCAmelCase : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : int = []
for n in solution[1:-1]:
_UpperCAmelCase : Tuple = solution.index(lowerCAmelCase_ )
for kn in solution[1:-1]:
_UpperCAmelCase : int = solution.index(lowerCAmelCase_ )
if n == kn:
continue
_UpperCAmelCase : Tuple = copy.deepcopy(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = kn
_UpperCAmelCase : List[str] = n
_UpperCAmelCase : Optional[int] = 0
for k in _tmp[:-1]:
_UpperCAmelCase : List[str] = _tmp[_tmp.index(lowerCAmelCase_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_UpperCAmelCase : Dict = distance + int(i[1] )
_tmp.append(lowerCAmelCase_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_UpperCAmelCase : Dict = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowerCAmelCase_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : Optional[Any] = first_solution
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[Any] = distance_of_first_solution
_UpperCAmelCase : Dict = solution
while count <= iters:
_UpperCAmelCase : Any = find_neighborhood(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Optional[Any] = neighborhood[index_of_best_solution]
_UpperCAmelCase : Optional[Any] = len(lowerCAmelCase_ ) - 1
_UpperCAmelCase : Optional[Any] = False
while not found:
_UpperCAmelCase : Tuple = 0
while i < len(lowerCAmelCase_ ):
if best_solution[i] != solution[i]:
_UpperCAmelCase : Any = best_solution[i]
_UpperCAmelCase : str = solution[i]
break
_UpperCAmelCase : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = best_solution[:-1]
_UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_UpperCAmelCase : Tuple = cost
_UpperCAmelCase : List[Any] = solution
else:
_UpperCAmelCase : Any = index_of_best_solution + 1
_UpperCAmelCase : Dict = neighborhood[index_of_best_solution]
if len(lowerCAmelCase_ ) >= size:
tabu_list.pop(0 )
_UpperCAmelCase : Optional[Any] = count + 1
return best_solution_ever, best_cost
def snake_case_ ( lowerCAmelCase_=None )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = generate_neighbours(args.File )
_UpperCAmelCase ,_UpperCAmelCase : Tuple = generate_first_solution(
args.File , lowerCAmelCase_ )
_UpperCAmelCase ,_UpperCAmelCase : str = tabu_search(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 349 | 1 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : Any = 1_6
A_ : Union[str, Any] = 3_2
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 16 )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase : str = DatasetDict(
{
"""train""": dataset["""train"""].select(lowerCAmelCase_ ),
"""validation""": dataset["""train"""].select(lowerCAmelCase_ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase : Optional[int] = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase : List[str] = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase : Any = 8
else:
_UpperCAmelCase : Dict = None
return tokenizer.pad(
lowerCAmelCase_ , padding="""longest""" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Dict = DataLoader(
tokenized_datasets["""test"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader, test_dataloader
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
# Download the dataset
_UpperCAmelCase : Dict = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_UpperCAmelCase : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : Dict = config["""lr"""]
_UpperCAmelCase : List[Any] = int(config["""num_epochs"""] )
_UpperCAmelCase : str = int(config["""seed"""] )
_UpperCAmelCase : List[Any] = int(config["""batch_size"""] )
_UpperCAmelCase : int = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase : Dict = batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase : Tuple = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
# New Code #
# Create our folds:
_UpperCAmelCase : Any = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_UpperCAmelCase : Tuple = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = get_fold_dataloaders(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase : int = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
_UpperCAmelCase : Dict = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Any = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase : Union[str, Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Dict = outputs.loss
_UpperCAmelCase : int = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
_UpperCAmelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowerCAmelCase_ )
# New Code #
# We also run predictions on the test set at the very end
_UpperCAmelCase : Tuple = []
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Any = outputs.logits
_UpperCAmelCase ,_UpperCAmelCase : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCAmelCase_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_UpperCAmelCase : List[Any] = torch.cat(lowerCAmelCase_ , dim=0 )
_UpperCAmelCase : Union[str, Any] = torch.stack(lowerCAmelCase_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_UpperCAmelCase : List[str] = metric.compute(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )
accelerator.print("""Average test metrics from all folds:""" , lowerCAmelCase_ )
def snake_case_ ( )-> Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=lowerCAmelCase_ , default=3 , help="""The number of splits to perform across the dataset""" )
_UpperCAmelCase : Optional[int] = parser.parse_args()
_UpperCAmelCase : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 349 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase :
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ) -> List[str]:
_UpperCAmelCase : list[list[Edge]] = [[] for _ in range(a_ )]
_UpperCAmelCase : int = size
def __getitem__( self ,a_ ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def _snake_case ( self ) -> List[Any]:
return self._size
def _snake_case ( self ,a_ ,a_ ,a_ ) -> Tuple:
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(a_ ,a_ ) )
def _snake_case ( self ,a_ ,a_ ) -> int | None:
_UpperCAmelCase : Union[str, Any] = deque([start_vertex] )
_UpperCAmelCase : list[int | None] = [None] * self.size
_UpperCAmelCase : Union[str, Any] = 0
while queue:
_UpperCAmelCase : Union[str, Any] = queue.popleft()
_UpperCAmelCase : Union[str, Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_UpperCAmelCase : List[Any] = current_distance + edge.weight
_UpperCAmelCase : List[Any] = distances[edge.destination_vertex]
if (
isinstance(a_ ,a_ )
and new_distance >= dest_vertex_distance
):
continue
_UpperCAmelCase : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def snake_case_ ( )-> Union[str, Any]:
'''simple docstring'''
raise RuntimeError("""CUDA out of memory.""" )
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> int:
super().__init__()
_UpperCAmelCase : Union[str, Any] = nn.Linear(3 ,4 )
_UpperCAmelCase : List[str] = nn.BatchNormad(4 )
_UpperCAmelCase : int = nn.Linear(4 ,5 )
def _snake_case ( self ,a_ ) -> str:
return self.lineara(self.batchnorm(self.lineara(a_ ) ) )
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : Tuple = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(a_ ):
nonlocal batch_sizes
batch_sizes.append(a_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(a_ ,[128, 64, 32, 16, 8] )
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : Union[str, Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(a_ ,a_ ):
nonlocal batch_sizes
batch_sizes.append(a_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_UpperCAmelCase ,_UpperCAmelCase : str = mock_training_loop_function("""hello""" )
self.assertListEqual(a_ ,[128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] ,[8, """hello"""] )
def _snake_case ( self ) -> Any:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(a_ ):
pass
with self.assertRaises(a_ ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" ,cm.exception.args[0] )
def _snake_case ( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(a_ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(a_ ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" ,cm.exception.args[0] )
def _snake_case ( self ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(a_ ,a_ ,a_ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(a_ ) as cm:
mock_training_loop_function(128 ,"""hello""" ,"""world""" )
self.assertIn("""Batch size was passed into `f`""" ,cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" ,cm.exception.args[0] )
def _snake_case ( self ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(a_ ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(a_ ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" ,cm.exception.args[0] )
@require_cuda
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : int = torch.cuda.memory_allocated()
_UpperCAmelCase : int = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() ,a_ )
_UpperCAmelCase : List[Any] = release_memory(a_ )
self.assertEqual(torch.cuda.memory_allocated() ,a_ )
| 349 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : Any = 1_6
A_ : Union[str, Any] = 3_2
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 16 )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase : str = DatasetDict(
{
"""train""": dataset["""train"""].select(lowerCAmelCase_ ),
"""validation""": dataset["""train"""].select(lowerCAmelCase_ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase : Optional[int] = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase : List[str] = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase : Any = 8
else:
_UpperCAmelCase : Dict = None
return tokenizer.pad(
lowerCAmelCase_ , padding="""longest""" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Dict = DataLoader(
tokenized_datasets["""test"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader, test_dataloader
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
# Download the dataset
_UpperCAmelCase : Dict = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_UpperCAmelCase : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : Dict = config["""lr"""]
_UpperCAmelCase : List[Any] = int(config["""num_epochs"""] )
_UpperCAmelCase : str = int(config["""seed"""] )
_UpperCAmelCase : List[Any] = int(config["""batch_size"""] )
_UpperCAmelCase : int = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase : Dict = batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase : Tuple = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
# New Code #
# Create our folds:
_UpperCAmelCase : Any = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_UpperCAmelCase : Tuple = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = get_fold_dataloaders(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase : int = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
_UpperCAmelCase : Dict = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Any = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase : Union[str, Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Dict = outputs.loss
_UpperCAmelCase : int = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
_UpperCAmelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowerCAmelCase_ )
# New Code #
# We also run predictions on the test set at the very end
_UpperCAmelCase : Tuple = []
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Any = outputs.logits
_UpperCAmelCase ,_UpperCAmelCase : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCAmelCase_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_UpperCAmelCase : List[Any] = torch.cat(lowerCAmelCase_ , dim=0 )
_UpperCAmelCase : Union[str, Any] = torch.stack(lowerCAmelCase_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_UpperCAmelCase : List[str] = metric.compute(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )
accelerator.print("""Average test metrics from all folds:""" , lowerCAmelCase_ )
def snake_case_ ( )-> Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=lowerCAmelCase_ , default=3 , help="""The number of splits to perform across the dataset""" )
_UpperCAmelCase : Optional[int] = parser.parse_args()
_UpperCAmelCase : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 349 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = tempfile.mkdtemp()
_UpperCAmelCase : Optional[int] = BlipImageProcessor()
_UpperCAmelCase : Dict = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
_UpperCAmelCase : List[str] = BlipProcessor(a_ ,a_ )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self ,**a_ ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname ,**a_ ).tokenizer
def _snake_case ( self ,**a_ ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname ,**a_ ).image_processor
def _snake_case ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : List[Any] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
_UpperCAmelCase : List[str] = [Image.fromarray(np.moveaxis(a_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : List[str] = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : str = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
_UpperCAmelCase : int = self.get_image_processor(do_normalize=a_ ,padding_value=1.0 )
_UpperCAmelCase : str = BlipProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=a_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,a_ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,a_ )
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : Any = self.get_image_processor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : str = BlipProcessor(tokenizer=a_ ,image_processor=a_ )
_UpperCAmelCase : int = self.prepare_image_inputs()
_UpperCAmelCase : Union[str, Any] = image_processor(a_ ,return_tensors="""np""" )
_UpperCAmelCase : Any = processor(images=a_ ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : List[Any] = self.get_tokenizer()
_UpperCAmelCase : Any = BlipProcessor(tokenizer=a_ ,image_processor=a_ )
_UpperCAmelCase : int = """lower newer"""
_UpperCAmelCase : Tuple = processor(text=a_ )
_UpperCAmelCase : Tuple = tokenizer(a_ ,return_token_type_ids=a_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _snake_case ( self ) -> int:
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : List[str] = BlipProcessor(tokenizer=a_ ,image_processor=a_ )
_UpperCAmelCase : List[str] = """lower newer"""
_UpperCAmelCase : Tuple = self.prepare_image_inputs()
_UpperCAmelCase : List[str] = processor(text=a_ ,images=a_ )
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Union[str, Any] = self.get_image_processor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = BlipProcessor(tokenizer=a_ ,image_processor=a_ )
_UpperCAmelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : Optional[Any] = processor.batch_decode(a_ )
_UpperCAmelCase : Tuple = tokenizer.batch_decode(a_ )
self.assertListEqual(a_ ,a_ )
def _snake_case ( self ) -> int:
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = BlipProcessor(tokenizer=a_ ,image_processor=a_ )
_UpperCAmelCase : Union[str, Any] = """lower newer"""
_UpperCAmelCase : Optional[int] = self.prepare_image_inputs()
_UpperCAmelCase : List[str] = processor(text=a_ ,images=a_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
| 349 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
A_ : Dict = logging.getLogger(__name__)
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """sequence-classification"""
def __init__( self ,a_ ) -> Dict:
if type(a_ ) == dict:
_UpperCAmelCase : Tuple = Namespace(**a_ )
_UpperCAmelCase : Optional[int] = glue_output_modes[hparams.task]
_UpperCAmelCase : Union[str, Any] = glue_tasks_num_labels[hparams.task]
super().__init__(a_ ,a_ ,self.mode )
def _snake_case ( self ,**a_ ) -> Optional[Any]:
return self.model(**a_ )
def _snake_case ( self ,a_ ,a_ ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCAmelCase : Any = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_UpperCAmelCase : Any = self(**a_ )
_UpperCAmelCase : int = outputs[0]
_UpperCAmelCase : Any = self.trainer.lr_schedulers[0]["""scheduler"""]
_UpperCAmelCase : Any = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _snake_case ( self ) -> int:
_UpperCAmelCase : Optional[int] = self.hparams
_UpperCAmelCase : int = processors[args.task]()
_UpperCAmelCase : str = processor.get_labels()
for mode in ["train", "dev"]:
_UpperCAmelCase : Tuple = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" ,a_ )
else:
logger.info("""Creating features from dataset file at %s""" ,args.data_dir )
_UpperCAmelCase : List[Any] = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
_UpperCAmelCase : Union[str, Any] = convert_examples_to_features(
a_ ,self.tokenizer ,max_length=args.max_seq_length ,label_list=self.labels ,output_mode=args.glue_output_mode ,)
logger.info("""Saving features into cached file %s""" ,a_ )
torch.save(a_ ,a_ )
def _snake_case ( self ,a_ ,a_ ,a_ = False ) -> DataLoader:
_UpperCAmelCase : Union[str, Any] = """dev""" if mode == """test""" else mode
_UpperCAmelCase : Tuple = self._feature_file(a_ )
logger.info("""Loading features from cached file %s""" ,a_ )
_UpperCAmelCase : Union[str, Any] = torch.load(a_ )
_UpperCAmelCase : List[str] = torch.tensor([f.input_ids for f in features] ,dtype=torch.long )
_UpperCAmelCase : Tuple = torch.tensor([f.attention_mask for f in features] ,dtype=torch.long )
_UpperCAmelCase : str = torch.tensor([f.token_type_ids for f in features] ,dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_UpperCAmelCase : Optional[int] = torch.tensor([f.label for f in features] ,dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_UpperCAmelCase : str = torch.tensor([f.label for f in features] ,dtype=torch.float )
return DataLoader(
TensorDataset(a_ ,a_ ,a_ ,a_ ) ,batch_size=a_ ,shuffle=a_ ,)
def _snake_case ( self ,a_ ,a_ ) -> Any:
_UpperCAmelCase : Any = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCAmelCase : int = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_UpperCAmelCase : List[str] = self(**a_ )
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = outputs[:2]
_UpperCAmelCase : List[str] = logits.detach().cpu().numpy()
_UpperCAmelCase : Union[str, Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _snake_case ( self ,a_ ) -> tuple:
_UpperCAmelCase : Optional[int] = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
_UpperCAmelCase : Any = np.concatenate([x["""pred"""] for x in outputs] ,axis=0 )
if self.hparams.glue_output_mode == "classification":
_UpperCAmelCase : int = np.argmax(a_ ,axis=1 )
elif self.hparams.glue_output_mode == "regression":
_UpperCAmelCase : Union[str, Any] = np.squeeze(a_ )
_UpperCAmelCase : str = np.concatenate([x["""target"""] for x in outputs] ,axis=0 )
_UpperCAmelCase : Tuple = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCAmelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCAmelCase : Optional[int] = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task ,a_ ,a_ )}
_UpperCAmelCase : Dict = dict(results.items() )
_UpperCAmelCase : Any = results
return ret, preds_list, out_label_list
def _snake_case ( self ,a_ ) -> dict:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = self._eval_end(a_ )
_UpperCAmelCase : List[Any] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _snake_case ( self ,a_ ) -> dict:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = self._eval_end(a_ )
_UpperCAmelCase : List[Any] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _snake_case ( a_ ,a_ ) -> Any:
BaseTransformer.add_model_specific_args(a_ ,a_ )
parser.add_argument(
"""--max_seq_length""" ,default=128 ,type=a_ ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--task""" ,default="""""" ,type=a_ ,required=a_ ,help="""The GLUE task to run""" ,)
parser.add_argument(
"""--gpus""" ,default=0 ,type=a_ ,help="""The number of GPUs allocated for this, it is by default 0 meaning none""" ,)
parser.add_argument(
"""--overwrite_cache""" ,action="""store_true""" ,help="""Overwrite the cached training and evaluation sets""" )
return parser
def snake_case_ ( )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
add_generic_args(lowerCAmelCase_ , os.getcwd() )
_UpperCAmelCase : Optional[int] = GLUETransformer.add_model_specific_args(lowerCAmelCase_ , os.getcwd() )
_UpperCAmelCase : Optional[int] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_UpperCAmelCase : Optional[int] = os.path.join(
"""./results""" , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_UpperCAmelCase : int = GLUETransformer(lowerCAmelCase_ )
_UpperCAmelCase : Any = generic_train(lowerCAmelCase_ , lowerCAmelCase_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_UpperCAmelCase : int = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=lowerCAmelCase_ ) )
_UpperCAmelCase : int = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 349 | 1 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : List[str] = 3
_UpperCAmelCase : Union[str, Any] = (32, 32)
_UpperCAmelCase : str = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(a_ )
return image
@property
def _snake_case ( self ) -> List[Any]:
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def _snake_case ( self ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCAmelCase : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def _snake_case ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModel(a_ )
@property
def _snake_case ( self ) -> Union[str, Any]:
def extract(*a_ ,**a_ ):
class lowercase :
"""simple docstring"""
def __init__( self ) -> Any:
_UpperCAmelCase : str = torch.ones([0] )
def _snake_case ( self ,a_ ) -> Any:
self.pixel_values.to(a_ )
return self
return Out()
return extract
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet
_UpperCAmelCase : int = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=a_ ,set_alpha_to_one=a_ ,)
_UpperCAmelCase : Optional[int] = self.dummy_vae
_UpperCAmelCase : Optional[int] = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : int = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Union[str, Any] = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Optional[int] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : int = output.images
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : str = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Tuple = self.dummy_cond_unet
_UpperCAmelCase : Optional[int] = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : int = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : str = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : str = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : int = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Any = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : List[Any] = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : List[Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : Any = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Union[str, Any] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=a_ )
assert isinstance(a_ ,a_ )
assert isinstance(pipe.scheduler ,a_ )
assert pipe.safety_checker is None
_UpperCAmelCase : Dict = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained(a_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_UpperCAmelCase : Union[str, Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Optional[int] = self.dummy_cond_unet
_UpperCAmelCase : str = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : List[str] = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
_UpperCAmelCase : str = unet.half()
_UpperCAmelCase : List[str] = vae.half()
_UpperCAmelCase : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Dict = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : str = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : int = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Dict = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : List[Any] = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
_UpperCAmelCase : Any = 4_003_660_346
_UpperCAmelCase : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : str = output.images
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
_UpperCAmelCase : List[str] = torch.manual_seed(a_ )
_UpperCAmelCase : Optional[Any] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> int:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
_UpperCAmelCase : Optional[Any] = 2_734_971_755
_UpperCAmelCase : Optional[int] = 7
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : Optional[int] = output.images
_UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[int] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
_UpperCAmelCase : Optional[int] = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Union[str, Any] = output.images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Optional[int] = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
_UpperCAmelCase : Dict = 1_044_355_234
_UpperCAmelCase : int = 12
_UpperCAmelCase : Optional[Any] = torch.manual_seed(a_ )
_UpperCAmelCase : List[str] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
_UpperCAmelCase : Tuple = torch.manual_seed(a_ )
_UpperCAmelCase : Dict = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Optional[Any] = output.images
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 349 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Union[str, Any] = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """roformer"""
def __init__( self ,a_=50_000 ,a_=None ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=1_536 ,a_=2 ,a_=0.02 ,a_=1E-1_2 ,a_=0 ,a_=False ,a_=True ,**a_ ,) -> Tuple:
super().__init__(pad_token_id=a_ ,**a_ )
_UpperCAmelCase : List[Any] = vocab_size
_UpperCAmelCase : str = hidden_size if embedding_size is None else embedding_size
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Any = type_vocab_size
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Dict = layer_norm_eps
_UpperCAmelCase : Optional[int] = rotary_value
_UpperCAmelCase : Any = use_cache
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase : List[Any] = {0: """batch""", 1: """sequence"""}
_UpperCAmelCase : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 349 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A_ : str = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@slow
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" ,"""prajjwal1/bert-tiny""" )
_UpperCAmelCase : List[Any] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_UpperCAmelCase : List[Any] = bertabert.config.encoder.vocab_size
_UpperCAmelCase : Optional[int] = tokenizer.sep_token_id
_UpperCAmelCase : Union[str, Any] = tokenizer.cls_token_id
_UpperCAmelCase : str = 128
_UpperCAmelCase : List[str] = datasets.load_dataset("""cnn_dailymail""" ,"""3.0.0""" ,split="""train[:1%]""" )
_UpperCAmelCase : Union[str, Any] = datasets.load_dataset("""cnn_dailymail""" ,"""3.0.0""" ,split="""validation[:1%]""" )
_UpperCAmelCase : Any = train_dataset.select(range(32 ) )
_UpperCAmelCase : Any = val_dataset.select(range(16 ) )
_UpperCAmelCase : List[Any] = 4
def _map_to_encoder_decoder_inputs(a_ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCAmelCase : int = tokenizer(batch["""article"""] ,padding="""max_length""" ,truncation=a_ ,max_length=512 )
_UpperCAmelCase : Tuple = tokenizer(batch["""highlights"""] ,padding="""max_length""" ,truncation=a_ ,max_length=128 )
_UpperCAmelCase : int = inputs.input_ids
_UpperCAmelCase : Union[str, Any] = inputs.attention_mask
_UpperCAmelCase : Union[str, Any] = outputs.input_ids
_UpperCAmelCase : Dict = outputs.input_ids.copy()
_UpperCAmelCase : Dict = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_UpperCAmelCase : Optional[int] = outputs.attention_mask
assert all(len(a_ ) == 512 for x in inputs.input_ids )
assert all(len(a_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(a_ ):
_UpperCAmelCase : Optional[int] = pred.label_ids
_UpperCAmelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(a_ ,skip_special_tokens=a_ )
_UpperCAmelCase : str = tokenizer.batch_decode(a_ ,skip_special_tokens=a_ )
_UpperCAmelCase : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(a_ ) )] ) / len(a_ )
return {"accuracy": accuracy}
# map train dataset
_UpperCAmelCase : Union[str, Any] = train_dataset.map(
_map_to_encoder_decoder_inputs ,batched=a_ ,batch_size=a_ ,remove_columns=["""article""", """highlights"""] ,)
train_dataset.set_format(
type="""torch""" ,columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] ,)
# same for validation dataset
_UpperCAmelCase : List[str] = val_dataset.map(
_map_to_encoder_decoder_inputs ,batched=a_ ,batch_size=a_ ,remove_columns=["""article""", """highlights"""] ,)
val_dataset.set_format(
type="""torch""" ,columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] ,)
_UpperCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : List[str] = SeqaSeqTrainingArguments(
output_dir=a_ ,per_device_train_batch_size=a_ ,per_device_eval_batch_size=a_ ,predict_with_generate=a_ ,evaluation_strategy="""steps""" ,do_train=a_ ,do_eval=a_ ,warmup_steps=0 ,eval_steps=2 ,logging_steps=2 ,)
# instantiate trainer
_UpperCAmelCase : int = SeqaSeqTrainer(
model=a_ ,args=a_ ,compute_metrics=_compute_metrics ,train_dataset=a_ ,eval_dataset=a_ ,tokenizer=a_ ,)
# start training
trainer.train()
| 349 | 1 |
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def snake_case_ ( lowerCAmelCase_ = "isbn/0140328726" )-> dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
_UpperCAmelCase : List[Any] = F'''{olid} is not a valid Open Library olid'''
raise ValueError(lowerCAmelCase_ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def snake_case_ ( lowerCAmelCase_ )-> dict:
'''simple docstring'''
_UpperCAmelCase : Dict = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
_UpperCAmelCase : Optional[int] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_UpperCAmelCase : Optional[int] = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
_UpperCAmelCase : str = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = """, """.join(lowerCAmelCase_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
A_ : str = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(f"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(f"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
A_ : int = summarize_book(get_openlibrary_data(f"""isbn/{isbn}"""))
print("""\n""".join(f"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f"""Sorry, there are no results for ISBN: {isbn}.""")
| 349 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
A_ : List[Any] = 637_8137.0
A_ : Dict = 635_6752.31_4245
A_ : int = 6_3_7_8_1_3_7
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> float:
'''simple docstring'''
_UpperCAmelCase : Tuple = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_UpperCAmelCase : Any = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
_UpperCAmelCase : Optional[Any] = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_UpperCAmelCase : Union[str, Any] = haversine_distance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_UpperCAmelCase : Optional[int] = (b_lata + b_lata) / 2
_UpperCAmelCase : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_UpperCAmelCase : List[str] = (sin(lowerCAmelCase_ ) ** 2) * (cos(lowerCAmelCase_ ) ** 2)
_UpperCAmelCase : Union[str, Any] = cos(sigma / 2 ) ** 2
_UpperCAmelCase : Dict = (sigma - sin(lowerCAmelCase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_UpperCAmelCase : Union[str, Any] = (cos(lowerCAmelCase_ ) ** 2) * (sin(lowerCAmelCase_ ) ** 2)
_UpperCAmelCase : Union[str, Any] = sin(sigma / 2 ) ** 2
_UpperCAmelCase : Optional[Any] = (sigma + sin(lowerCAmelCase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
A_ : str = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ )-> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["""stage2""", """stage3""", """stage4"""] , )
_UpperCAmelCase : Optional[int] = DetaConfig(
backbone_config=lowerCAmelCase_ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=lowerCAmelCase_ , with_box_refine=lowerCAmelCase_ , two_stage=lowerCAmelCase_ , )
# set labels
_UpperCAmelCase : Optional[Any] = """huggingface/label-files"""
if "o365" in model_name:
_UpperCAmelCase : Union[str, Any] = 366
_UpperCAmelCase : Tuple = """object365-id2label.json"""
else:
_UpperCAmelCase : Any = 91
_UpperCAmelCase : str = """coco-detection-id2label.json"""
_UpperCAmelCase : Optional[Any] = num_labels
_UpperCAmelCase : Tuple = json.load(open(cached_download(hf_hub_url(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) ) , """r""" ) )
_UpperCAmelCase : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCAmelCase : Optional[Any] = idalabel
_UpperCAmelCase : str = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.reduction.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.bias''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") )
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") )
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") )
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") )
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") )
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', F'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', F'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', F'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', F'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', F'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', F'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.weight''', F'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.weight''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.weight''', F'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.bias''', F'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
'''simple docstring'''
_UpperCAmelCase : Dict = dct.pop(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = val
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCAmelCase : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCAmelCase : Dict = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_UpperCAmelCase : str = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : str = in_proj_weight[:dim, :]
_UpperCAmelCase : List[Any] = in_proj_bias[: dim]
_UpperCAmelCase : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
_UpperCAmelCase : List[Any] = in_proj_bias[
dim : dim * 2
]
_UpperCAmelCase : str = in_proj_weight[
-dim :, :
]
_UpperCAmelCase : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_UpperCAmelCase : Optional[Any] = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_UpperCAmelCase : Union[str, Any] = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Optional[int] = in_proj_weight[:hidden_size, :]
_UpperCAmelCase : List[str] = in_proj_bias[:hidden_size]
_UpperCAmelCase : str = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_UpperCAmelCase : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
_UpperCAmelCase : List[str] = in_proj_weight[-hidden_size:, :]
_UpperCAmelCase : List[Any] = in_proj_bias[-hidden_size:]
def snake_case_ ( )-> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase : List[Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
'''simple docstring'''
_UpperCAmelCase : Any = get_deta_config(lowerCAmelCase_ )
# load original state dict
if model_name == "deta-swin-large":
_UpperCAmelCase : Any = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" , filename="""adet_swin_ft.pth""" )
elif model_name == "deta-swin-large-o365":
_UpperCAmelCase : str = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" , filename="""deta_swin_pt_o365.pth""" )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
_UpperCAmelCase : Optional[int] = torch.load(lowerCAmelCase_ , map_location="""cpu""" )["""model"""]
# original state dict
for name, param in state_dict.items():
print(lowerCAmelCase_ , param.shape )
# rename keys
_UpperCAmelCase : Union[str, Any] = create_rename_keys(lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_swin_q_k_v(lowerCAmelCase_ , config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_UpperCAmelCase : Union[str, Any] = state_dict.pop(lowerCAmelCase_ )
_UpperCAmelCase : Tuple = val
if "input_proj" in key:
_UpperCAmelCase : Tuple = state_dict.pop(lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_UpperCAmelCase : Any = state_dict.pop(lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = val
# finally, create HuggingFace model and load state dict
_UpperCAmelCase : Optional[Any] = DetaForObjectDetection(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
_UpperCAmelCase : int = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(lowerCAmelCase_ )
# load image processor
_UpperCAmelCase : Optional[int] = DetaImageProcessor(format="""coco_detection""" )
# verify our conversion on image
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : int = processor(images=lowerCAmelCase_ , return_tensors="""pt""" )
_UpperCAmelCase : Tuple = encoding["""pixel_values"""]
_UpperCAmelCase : Optional[int] = model(pixel_values.to(lowerCAmelCase_ ) )
# verify logits
print("""Logits:""" , outputs.logits[0, :3, :3] )
print("""Boxes:""" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_UpperCAmelCase : Dict = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_UpperCAmelCase : Optional[int] = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_UpperCAmelCase : List[Any] = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_UpperCAmelCase : Any = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(lowerCAmelCase_ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(lowerCAmelCase_ ) , atol=1e-4 )
print("""Everything ok!""" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""" )
model.push_to_hub(F'''jozhang97/{model_name}''' )
processor.push_to_hub(F'''jozhang97/{model_name}''' )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
type=str,
default="""deta-swin-large""",
choices=["""deta-swin-large""", """deta-swin-large-o365"""],
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
help="""Path to the folder to output PyTorch model.""",
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
A_ : Optional[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 349 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 100 , )-> float:
'''simple docstring'''
_UpperCAmelCase : str = x_start
_UpperCAmelCase : Union[str, Any] = fnc(lowerCAmelCase_ )
_UpperCAmelCase : Tuple = 0.0
for _ in range(lowerCAmelCase_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_UpperCAmelCase : Any = (x_end - x_start) / steps + xa
_UpperCAmelCase : List[Any] = fnc(lowerCAmelCase_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_UpperCAmelCase : Any = xa
_UpperCAmelCase : str = fxa
return area
if __name__ == "__main__":
def snake_case_ ( lowerCAmelCase_ )-> Any:
'''simple docstring'''
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
A_ : List[str] = 1_0
while i <= 1_0_0_0_0_0:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 1_0
| 349 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
_UpperCAmelCase : Union[str, Any] = BlipImageProcessor()
_UpperCAmelCase : Any = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
_UpperCAmelCase : str = BlipaProcessor(a_ ,a_ )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self ,**a_ ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname ,**a_ ).tokenizer
def _snake_case ( self ,**a_ ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname ,**a_ ).image_processor
def _snake_case ( self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
_UpperCAmelCase : Optional[Any] = [Image.fromarray(np.moveaxis(a_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : Optional[Any] = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
_UpperCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=a_ ,padding_value=1.0 )
_UpperCAmelCase : Optional[Any] = BlipaProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=a_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,a_ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,a_ )
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : List[Any] = self.get_image_processor()
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : str = BlipaProcessor(tokenizer=a_ ,image_processor=a_ )
_UpperCAmelCase : Dict = self.prepare_image_inputs()
_UpperCAmelCase : Optional[Any] = image_processor(a_ ,return_tensors="""np""" )
_UpperCAmelCase : List[Any] = processor(images=a_ ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = self.get_image_processor()
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : Tuple = BlipaProcessor(tokenizer=a_ ,image_processor=a_ )
_UpperCAmelCase : Dict = """lower newer"""
_UpperCAmelCase : int = processor(text=a_ )
_UpperCAmelCase : Tuple = tokenizer(a_ ,return_token_type_ids=a_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Union[str, Any] = self.get_image_processor()
_UpperCAmelCase : Dict = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = BlipaProcessor(tokenizer=a_ ,image_processor=a_ )
_UpperCAmelCase : Optional[Any] = """lower newer"""
_UpperCAmelCase : Dict = self.prepare_image_inputs()
_UpperCAmelCase : str = processor(text=a_ ,images=a_ )
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : List[Any] = self.get_image_processor()
_UpperCAmelCase : List[Any] = self.get_tokenizer()
_UpperCAmelCase : int = BlipaProcessor(tokenizer=a_ ,image_processor=a_ )
_UpperCAmelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : str = processor.batch_decode(a_ )
_UpperCAmelCase : Dict = tokenizer.batch_decode(a_ )
self.assertListEqual(a_ ,a_ )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : Optional[Any] = self.get_tokenizer()
_UpperCAmelCase : str = BlipaProcessor(tokenizer=a_ ,image_processor=a_ )
_UpperCAmelCase : Union[str, Any] = """lower newer"""
_UpperCAmelCase : Optional[int] = self.prepare_image_inputs()
_UpperCAmelCase : Dict = processor(text=a_ ,images=a_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
| 349 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case_ ( )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowerCAmelCase_ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowerCAmelCase_ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowerCAmelCase_ )
return parser.parse_args()
def snake_case_ ( )-> str:
'''simple docstring'''
_UpperCAmelCase : List[str] = parse_args()
# Import training_script as a module.
_UpperCAmelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCAmelCase : Optional[Any] = script_fpath.stem
_UpperCAmelCase : List[str] = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
_UpperCAmelCase : Dict = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 349 | 1 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def snake_case_ ( )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
_UpperCAmelCase : int = Dataset.from_dict(lowerCAmelCase_ )
return dataset
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : Dict = get_dataset()
_UpperCAmelCase : Tuple = make_duplicate_clusters(a_ ,0.85 )
self.assertEqual(len(duplicate_clusters[0] ) ,2 )
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = get_dataset()
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = deduplicate_dataset(a_ )
self.assertEqual(len(a_ ) ,2 )
print(a_ )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] ,2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] ,a_ )
| 349 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("""only integers accepted as input""" )
else:
_UpperCAmelCase : Dict = str(abs(lowerCAmelCase_ ) )
_UpperCAmelCase : Optional[Any] = [list(lowerCAmelCase_ ) for char in range(len(lowerCAmelCase_ ) )]
for index in range(len(lowerCAmelCase_ ) ):
num_transpositions[index].pop(lowerCAmelCase_ )
return max(
int("""""".join(list(lowerCAmelCase_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 349 | 1 |
'''simple docstring'''
from __future__ import annotations
import bisect
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = -1 )-> int:
'''simple docstring'''
if hi < 0:
_UpperCAmelCase : List[str] = len(lowerCAmelCase_ )
while lo < hi:
_UpperCAmelCase : Dict = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_UpperCAmelCase : List[Any] = mid + 1
else:
_UpperCAmelCase : Tuple = mid
return lo
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = -1 )-> int:
'''simple docstring'''
if hi < 0:
_UpperCAmelCase : Union[str, Any] = len(lowerCAmelCase_ )
while lo < hi:
_UpperCAmelCase : List[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_UpperCAmelCase : Union[str, Any] = mid + 1
else:
_UpperCAmelCase : Optional[Any] = mid
return lo
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = -1 )-> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = -1 )-> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int | None:
'''simple docstring'''
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Optional[int] = len(lowerCAmelCase_ ) - 1
while left <= right:
_UpperCAmelCase : Optional[int] = left + (right - left) // 2
_UpperCAmelCase : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_UpperCAmelCase : Optional[int] = midpoint - 1
else:
_UpperCAmelCase : Union[str, Any] = midpoint + 1
return None
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int | None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = bisect.bisect_left(lowerCAmelCase_ , lowerCAmelCase_ )
if index != len(lowerCAmelCase_ ) and sorted_collection[index] == item:
return index
return None
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int | None:
'''simple docstring'''
if right < left:
return None
_UpperCAmelCase : Dict = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , midpoint - 1 )
else:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , midpoint + 1 , lowerCAmelCase_ )
if __name__ == "__main__":
A_ : List[Any] = input("""Enter numbers separated by comma:\n""").strip()
A_ : Union[str, Any] = sorted(int(item) for item in user_input.split(""","""))
A_ : Union[str, Any] = int(input("""Enter a single number to be found in the list:\n"""))
A_ : Union[str, Any] = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 349 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A_ : Dict = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> None:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), F'''{len(lowerCAmelCase_ )} != {len(lowerCAmelCase_ )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A_ : Union[str, Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A_ : int = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
try:
_UpperCAmelCase : Any = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[int]:
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(lowerCAmelCase_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ = "student" , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , )-> Tuple[PreTrainedModel, List[int], List[int]]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
AutoTokenizer.from_pretrained(lowerCAmelCase_ ).save_pretrained(lowerCAmelCase_ ) # purely for convenience
_UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ ).eval()
else:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), F'''teacher must be a model or string got type {type(lowerCAmelCase_ )}'''
_UpperCAmelCase : str = teacher.config.to_diff_dict()
try:
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCAmelCase : Tuple = teacher_e
if d is None:
_UpperCAmelCase : Dict = teacher_d
init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} )
except AttributeError: # T5
if hasattr(teacher.config , """num_encoder_layers""" ):
_UpperCAmelCase ,_UpperCAmelCase : int = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCAmelCase ,_UpperCAmelCase : int = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCAmelCase : List[str] = teacher_e
if d is None:
_UpperCAmelCase : str = teacher_d
if hasattr(teacher.config , """num_encoder_layers""" ):
init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} )
else:
init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase_ )
# Copy weights
_UpperCAmelCase : Any = teacher.config_class(**lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCAmelCase : Optional[Any] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = list(range(lowerCAmelCase_ ) ), list(range(lowerCAmelCase_ ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(lowerCAmelCase_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase_ , lowerCAmelCase_ )
if d_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase_ , lowerCAmelCase_ )
try:
if hasattr(
lowerCAmelCase_ , """prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase_ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase_ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase_ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase_ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase_ )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
_UpperCAmelCase : Dict = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 349 | 1 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ )-> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
_UpperCAmelCase : Union[str, Any] = [True] * (num + 1)
_UpperCAmelCase : str = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : List[Any] = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 349 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = right or len(lowerCAmelCase_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowerCAmelCase_ , lowerCAmelCase_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
A_ : Any = logging.getLogger(__name__)
@dataclass
class lowercase :
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
@dataclass
class lowercase :
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = None
UpperCAmelCase = None
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """train"""
UpperCAmelCase = """dev"""
UpperCAmelCase = """test"""
class lowercase :
"""simple docstring"""
@staticmethod
def _snake_case ( a_ ,a_ ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def _snake_case ( a_ ) -> List[str]:
raise NotImplementedError
@staticmethod
def _snake_case ( a_ ,a_ ,a_ ,a_ ,a_=False ,a_="[CLS]" ,a_=1 ,a_="[SEP]" ,a_=False ,a_=False ,a_=0 ,a_=0 ,a_=-100 ,a_=0 ,a_=True ,) -> List[InputFeatures]:
_UpperCAmelCase : List[Any] = {label: i for i, label in enumerate(a_ )}
_UpperCAmelCase : str = []
for ex_index, example in enumerate(a_ ):
if ex_index % 10_000 == 0:
logger.info("""Writing example %d of %d""" ,a_ ,len(a_ ) )
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : List[Any] = []
for word, label in zip(example.words ,example.labels ):
_UpperCAmelCase : int = tokenizer.tokenize(a_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(a_ ) > 0:
tokens.extend(a_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(a_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_UpperCAmelCase : Union[str, Any] = tokenizer.num_special_tokens_to_add()
if len(a_ ) > max_seq_length - special_tokens_count:
_UpperCAmelCase : Optional[int] = tokens[: (max_seq_length - special_tokens_count)]
_UpperCAmelCase : str = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_UpperCAmelCase : str = [sequence_a_segment_id] * len(a_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_UpperCAmelCase : Optional[int] = [cls_token] + tokens
_UpperCAmelCase : List[str] = [pad_token_label_id] + label_ids
_UpperCAmelCase : str = [cls_token_segment_id] + segment_ids
_UpperCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(a_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_UpperCAmelCase : Optional[Any] = [1 if mask_padding_with_zero else 0] * len(a_ )
# Zero-pad up to the sequence length.
_UpperCAmelCase : Optional[Any] = max_seq_length - len(a_ )
if pad_on_left:
_UpperCAmelCase : Optional[int] = ([pad_token] * padding_length) + input_ids
_UpperCAmelCase : Optional[Any] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_UpperCAmelCase : List[str] = ([pad_token_segment_id] * padding_length) + segment_ids
_UpperCAmelCase : str = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(a_ ) == max_seq_length
assert len(a_ ) == max_seq_length
assert len(a_ ) == max_seq_length
assert len(a_ ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" ,example.guid )
logger.info("""tokens: %s""" ,""" """.join([str(a_ ) for x in tokens] ) )
logger.info("""input_ids: %s""" ,""" """.join([str(a_ ) for x in input_ids] ) )
logger.info("""input_mask: %s""" ,""" """.join([str(a_ ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" ,""" """.join([str(a_ ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" ,""" """.join([str(a_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_UpperCAmelCase : Tuple = None
features.append(
InputFeatures(
input_ids=a_ ,attention_mask=a_ ,token_type_ids=a_ ,label_ids=a_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = nn.CrossEntropyLoss().ignore_index
def __init__( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ = None ,a_=False ,a_ = Split.train ,) -> List[Any]:
# Load data features from cache or dataset file
_UpperCAmelCase : List[str] = os.path.join(
a_ ,"""cached_{}_{}_{}""".format(mode.value ,tokenizer.__class__.__name__ ,str(a_ ) ) ,)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCAmelCase : Optional[Any] = cached_features_file + """.lock"""
with FileLock(a_ ):
if os.path.exists(a_ ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
_UpperCAmelCase : Optional[Any] = torch.load(a_ )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
_UpperCAmelCase : Any = token_classification_task.read_examples_from_file(a_ ,a_ )
# TODO clean up all this to leverage built-in features of tokenizers
_UpperCAmelCase : Optional[int] = token_classification_task.convert_examples_to_features(
a_ ,a_ ,a_ ,a_ ,cls_token_at_end=bool(model_type in ["""xlnet"""] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=a_ ,pad_on_left=bool(tokenizer.padding_side == """left""" ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features ,a_ )
def __len__( self ) -> Union[str, Any]:
return len(self.features )
def __getitem__( self ,a_ ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase :
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = -100
def __init__( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ = None ,a_=False ,a_ = Split.train ,) -> Dict:
_UpperCAmelCase : Tuple = token_classification_task.read_examples_from_file(a_ ,a_ )
# TODO clean up all this to leverage built-in features of tokenizers
_UpperCAmelCase : str = token_classification_task.convert_examples_to_features(
a_ ,a_ ,a_ ,a_ ,cls_token_at_end=bool(model_type in ["""xlnet"""] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=a_ ,pad_on_left=bool(tokenizer.padding_side == """left""" ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_UpperCAmelCase : List[Any] = tf.data.Dataset.from_generator(
a_ ,({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) ,(
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) ,)
else:
_UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator(
a_ ,({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) ,(
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) ,)
def _snake_case ( self ) -> int:
_UpperCAmelCase : Tuple = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ) -> Union[str, Any]:
return len(self.features )
def __getitem__( self ,a_ ) -> InputFeatures:
return self.features[i]
| 349 |
'''simple docstring'''
from datetime import datetime
import requests
def snake_case_ ( lowerCAmelCase_ )-> bytes:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
_UpperCAmelCase : Dict = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(lowerCAmelCase_ ).content
if __name__ == "__main__":
A_ : Union[str, Any] = input("""Enter Video/IGTV url: """).strip()
A_ : Dict = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 349 | 1 |