code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ):
@register_to_config
def __init__( self , __UpperCamelCase = 128 , __UpperCamelCase = 256 , __UpperCamelCase = 2000.0 , __UpperCamelCase = 768 , __UpperCamelCase = 12 , __UpperCamelCase = 12 , __UpperCamelCase = 64 , __UpperCamelCase = 2048 , __UpperCamelCase = 0.1 , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
snake_case__ : Dict = nn.Sequential(
nn.Linear(__UpperCamelCase , d_model * 4 , bias=__UpperCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__UpperCamelCase ) , nn.SiLU() , )
snake_case__ : Tuple = nn.Embedding(__UpperCamelCase , __UpperCamelCase )
snake_case__ : int = False
snake_case__ : Optional[Any] = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
snake_case__ : int = nn.Dropout(p=__UpperCamelCase )
snake_case__ : Dict = nn.ModuleList()
for lyr_num in range(__UpperCamelCase ):
# FiLM conditional T5 decoder
snake_case__ : int = DecoderLayer(d_model=__UpperCamelCase , d_kv=__UpperCamelCase , num_heads=__UpperCamelCase , d_ff=__UpperCamelCase , dropout_rate=__UpperCamelCase )
self.decoders.append(__UpperCamelCase )
snake_case__ : Dict = TaLayerNorm(__UpperCamelCase )
snake_case__ : Optional[Any] = nn.Dropout(p=__UpperCamelCase )
snake_case__ : List[Any] = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> Dict:
'''simple docstring'''
snake_case__ : Dict = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
'''simple docstring'''
snake_case__ : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
snake_case__ : List[Any] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
snake_case__ : Optional[Any] = self.conditioning_emb(__UpperCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
snake_case__ : int = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
snake_case__ : Dict = torch.broadcast_to(
torch.arange(__UpperCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
snake_case__ : Union[str, Any] = self.position_encoding(__UpperCamelCase )
snake_case__ : Union[str, Any] = self.continuous_inputs_projection(__UpperCamelCase )
inputs += position_encodings
snake_case__ : Any = self.dropout(__UpperCamelCase )
# decoder: No padding present.
snake_case__ : Dict = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
snake_case__ : Dict = [(x, self.encoder_decoder_mask(__UpperCamelCase , __UpperCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
snake_case__ : Optional[int] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
snake_case__ : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
snake_case__ : Optional[Any] = lyr(
__UpperCamelCase , conditioning_emb=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )[0]
snake_case__ : Union[str, Any] = self.decoder_norm(__UpperCamelCase )
snake_case__ : Optional[int] = self.post_dropout(__UpperCamelCase )
snake_case__ : int = self.spec_out(__UpperCamelCase )
return spec_out
class __snake_case ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=1E-6 ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case__ : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__UpperCamelCase , d_kv=__UpperCamelCase , num_heads=__UpperCamelCase , dropout_rate=__UpperCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__UpperCamelCase , d_kv=__UpperCamelCase , num_heads=__UpperCamelCase , dropout_rate=__UpperCamelCase , layer_norm_epsilon=__UpperCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__UpperCamelCase , d_ff=__UpperCamelCase , dropout_rate=__UpperCamelCase , layer_norm_epsilon=__UpperCamelCase ) )
def __a ( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ) -> Dict:
'''simple docstring'''
snake_case__ : Any = self.layer[0](
__UpperCamelCase , conditioning_emb=__UpperCamelCase , attention_mask=__UpperCamelCase , )
if encoder_hidden_states is not None:
snake_case__ : Optional[Any] = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
snake_case__ : List[Any] = self.layer[1](
__UpperCamelCase , key_value_states=__UpperCamelCase , attention_mask=__UpperCamelCase , )
# Apply Film Conditional Feed Forward layer
snake_case__ : str = self.layer[-1](__UpperCamelCase , __UpperCamelCase )
return (hidden_states,)
class __snake_case ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
snake_case__ : Union[str, Any] = TaLayerNorm(__UpperCamelCase )
snake_case__ : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=__UpperCamelCase )
snake_case__ : Optional[int] = Attention(query_dim=__UpperCamelCase , heads=__UpperCamelCase , dim_head=__UpperCamelCase , out_bias=__UpperCamelCase , scale_qk=__UpperCamelCase )
snake_case__ : Dict = nn.Dropout(__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : List[str] = self.layer_norm(__UpperCamelCase )
if conditioning_emb is not None:
snake_case__ : List[str] = self.FiLMLayer(__UpperCamelCase , __UpperCamelCase )
# Self-attention block
snake_case__ : Union[str, Any] = self.attention(__UpperCamelCase )
snake_case__ : Optional[Any] = hidden_states + self.dropout(__UpperCamelCase )
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
snake_case__ : Any = Attention(query_dim=__UpperCamelCase , heads=__UpperCamelCase , dim_head=__UpperCamelCase , out_bias=__UpperCamelCase , scale_qk=__UpperCamelCase )
snake_case__ : Dict = TaLayerNorm(__UpperCamelCase , eps=__UpperCamelCase )
snake_case__ : Union[str, Any] = nn.Dropout(__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = self.layer_norm(__UpperCamelCase )
snake_case__ : Optional[Any] = self.attention(
__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
snake_case__ : Tuple = hidden_states + self.dropout(__UpperCamelCase )
return layer_output
class __snake_case ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
'''simple docstring'''
super().__init__()
snake_case__ : Optional[int] = TaDenseGatedActDense(d_model=__UpperCamelCase , d_ff=__UpperCamelCase , dropout_rate=__UpperCamelCase )
snake_case__ : Dict = TaFiLMLayer(in_features=d_model * 4 , out_features=__UpperCamelCase )
snake_case__ : Dict = TaLayerNorm(__UpperCamelCase , eps=__UpperCamelCase )
snake_case__ : int = nn.Dropout(__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase=None ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = self.layer_norm(__UpperCamelCase )
if conditioning_emb is not None:
snake_case__ : Optional[int] = self.film(__UpperCamelCase , __UpperCamelCase )
snake_case__ : Tuple = self.DenseReluDense(__UpperCamelCase )
snake_case__ : Tuple = hidden_states + self.dropout(__UpperCamelCase )
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
'''simple docstring'''
super().__init__()
snake_case__ : str = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
snake_case__ : int = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
snake_case__ : Optional[Any] = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
snake_case__ : int = nn.Dropout(__UpperCamelCase )
snake_case__ : Tuple = NewGELUActivation()
def __a ( self , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
snake_case__ : Tuple = self.act(self.wi_a(__UpperCamelCase ) )
snake_case__ : Dict = self.wi_a(__UpperCamelCase )
snake_case__ : List[Any] = hidden_gelu * hidden_linear
snake_case__ : Union[str, Any] = self.dropout(__UpperCamelCase )
snake_case__ : Tuple = self.wo(__UpperCamelCase )
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=1E-6 ) -> str:
'''simple docstring'''
super().__init__()
snake_case__ : Tuple = nn.Parameter(torch.ones(__UpperCamelCase ) )
snake_case__ : Dict = eps
def __a ( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
snake_case__ : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__UpperCamelCase )
snake_case__ : List[str] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
snake_case__ : Tuple = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __snake_case ( nn.Module ):
def __a ( self , __UpperCamelCase ) -> torch.Tensor:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(__UpperCamelCase , 3.0 )) ))
class __snake_case ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
super().__init__()
snake_case__ : int = nn.Linear(__UpperCamelCase , out_features * 2 , bias=__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Any = self.scale_bias(__UpperCamelCase )
snake_case__ : List[Any] = torch.chunk(__UpperCamelCase , 2 , -1 )
snake_case__ : Optional[int] = x * (1 + scale) + shift
return x
| 701 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : int = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = """poolformer"""
def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=16 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=4.0 , __UpperCamelCase=[2, 2, 6, 2] , __UpperCamelCase=[64, 128, 320, 512] , __UpperCamelCase=[7, 3, 3, 3] , __UpperCamelCase=[4, 2, 2, 2] , __UpperCamelCase=[2, 1, 1, 1] , __UpperCamelCase=4 , __UpperCamelCase=0.0 , __UpperCamelCase="gelu" , __UpperCamelCase=True , __UpperCamelCase=1E-5 , __UpperCamelCase=0.0_2 , **__UpperCamelCase , ) -> Any:
'''simple docstring'''
snake_case__ : List[str] = num_channels
snake_case__ : Dict = patch_size
snake_case__ : Optional[int] = stride
snake_case__ : str = padding
snake_case__ : List[str] = pool_size
snake_case__ : List[Any] = hidden_sizes
snake_case__ : List[Any] = mlp_ratio
snake_case__ : Union[str, Any] = depths
snake_case__ : Dict = patch_sizes
snake_case__ : Dict = strides
snake_case__ : Dict = num_encoder_blocks
snake_case__ : Union[str, Any] = drop_path_rate
snake_case__ : List[str] = hidden_act
snake_case__ : Optional[Any] = use_layer_scale
snake_case__ : int = layer_scale_init_value
snake_case__ : Dict = initializer_range
super().__init__(**__UpperCamelCase )
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = version.parse("""1.11""" )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __a ( self ) -> float:
'''simple docstring'''
return 2E-3
| 699 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def UpperCamelCase__ ( A__=None ) -> int:
snake_case__ : Any = argparse.ArgumentParser(add_help=A__ , allow_abbrev=A__ )
# The main config parser
snake_case__ : List[Any] = config_command_parser(A__ )
# The subparser to add commands to
snake_case__ : Tuple = config_parser.add_subparsers(title='subcommands' , dest='subcommand' )
# Then add other parsers with the parent parser
default_command_parser(A__ , parents=[parent_parser] )
update_command_parser(A__ , parents=[parent_parser] )
return config_parser
def UpperCamelCase__ ( ) -> List[str]:
snake_case__ : Union[str, Any] = get_config_parser()
snake_case__ : List[str] = config_parser.parse_args()
if not hasattr(A__ , 'func' ):
config_parser.print_help()
exit(1 )
# Run
args.func(A__ )
if __name__ == "__main__":
main()
| 702 | import numpy as np
import qiskit
def UpperCamelCase__ ( A__ = 8 , A__ = None ) -> str:
snake_case__ : Optional[int] = np.random.default_rng(seed=A__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
snake_case__ : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
snake_case__ : Tuple = rng.integers(2 , size=A__ )
# The set of states Alice will prepare.
snake_case__ : List[str] = rng.integers(2 , size=A__ )
# Measurement basis for Bob's qubits.
snake_case__ : List[Any] = rng.integers(2 , size=A__ )
# Quantum Circuit to simulate BB84
snake_case__ : Any = qiskit.QuantumCircuit(A__ , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(A__ ):
if alice_state[index] == 1:
bbaa_circ.x(A__ )
if alice_basis[index] == 1:
bbaa_circ.h(A__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(A__ ):
if bob_basis[index] == 1:
bbaa_circ.h(A__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
snake_case__ : List[str] = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
snake_case__ : Optional[Any] = qiskit.execute(A__ , A__ , shots=1 , seed_simulator=A__ )
# Returns the result of measurement.
snake_case__ : Union[str, Any] = job.result().get_counts(A__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
snake_case__ : Optional[Any] = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
A__ , A__ , A__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
snake_case__ : Tuple = gen_key[:key_len] if len(A__ ) >= key_len else gen_key.ljust(A__ , '0' )
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 699 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=18 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , ) -> Tuple:
'''simple docstring'''
snake_case__ : Dict = size if size is not None else {'shortest_edge': 20}
snake_case__ : str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
snake_case__ : List[Any] = parent
snake_case__ : Dict = batch_size
snake_case__ : List[str] = num_channels
snake_case__ : Any = image_size
snake_case__ : List[Any] = min_resolution
snake_case__ : List[Any] = max_resolution
snake_case__ : Optional[int] = do_resize
snake_case__ : List[Any] = size
snake_case__ : Any = do_center_crop
snake_case__ : Optional[Any] = crop_size
snake_case__ : List[str] = do_flip_channel_order
def __a ( self ) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __snake_case ( _lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = MobileViTImageProcessor if is_vision_available() else None
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = MobileViTImageProcessingTester(self )
@property
def __a ( self ) -> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'center_crop' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'do_flip_channel_order' ) )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
snake_case__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
snake_case__ : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
snake_case__ : Dict = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
snake_case__ : Union[str, Any] = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
snake_case__ : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
snake_case__ : Tuple = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 703 | def UpperCamelCase__ ( A__ , A__ , A__ ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case__ : Dict = _modexpt(A__ , exponent // 2 , A__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A__ , exponent - 1 , A__ )) % modulo_value
def UpperCamelCase__ ( A__ = 1777 , A__ = 1855 , A__ = 8 ) -> int:
snake_case__ : Tuple = base
for _ in range(1 , A__ ):
snake_case__ : Any = _modexpt(A__ , A__ , 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 699 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase__ : Optional[int] = 16
lowerCAmelCase__ : List[str] = 32
def UpperCamelCase__ ( A__ , A__ = 16 , A__ = "bert-base-cased" ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(A__ )
snake_case__ : List[Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : Union[str, Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case__ : Union[str, Any] = datasets.map(
A__ , batched=A__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ : Any = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(A__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
snake_case__ : List[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
snake_case__ : Optional[int] = DataLoader(
tokenized_datasets['validation'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def UpperCamelCase__ ( A__ , A__ ) -> int:
'''simple docstring'''
snake_case__ : Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ : Optional[int] = config['lr']
snake_case__ : Union[str, Any] = int(config['num_epochs'] )
snake_case__ : List[str] = int(config['seed'] )
snake_case__ : int = int(config['batch_size'] )
snake_case__ : Optional[Any] = args.model_name_or_path
set_seed(A__ )
snake_case__ : Any = get_dataloaders(A__ , A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ : List[str] = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
snake_case__ : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case__ : Any = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
snake_case__ : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
snake_case__ : Any = 1
snake_case__ : List[Any] = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case__ : List[Any] = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
snake_case__ : Union[str, Any] = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ : int = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
snake_case__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case__ : List[str] = 0
# Now we train the model
snake_case__ : str = evaluate.load('glue' , 'mrpc' )
snake_case__ : int = 0
snake_case__ : Optional[Any] = {}
for epoch in range(A__ , A__ ):
model.train()
for step, batch in enumerate(A__ ):
snake_case__ : Union[str, Any] = model(**A__ )
snake_case__ : Dict = outputs.loss
snake_case__ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
snake_case__ : Any = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : Optional[int] = model(**A__ )
snake_case__ : Union[str, Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case__ : int = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A__ ) - 1:
snake_case__ : Optional[int] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case__ : Union[str, Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A__ , references=A__ , )
snake_case__ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , A__ )
snake_case__ : List[Any] = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
snake_case__ : str = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(A__ , A__ )
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : str = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=A__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=A__ , )
parser.add_argument(
'--output_dir' , type=A__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=A__ , default=A__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=A__ , default=3 , help='Number of train epochs.' , )
snake_case__ : Union[str, Any] = parser.parse_args()
snake_case__ : List[Any] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 704 | # tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ : Tuple = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCamelCase__ ( A__ ) -> Optional[Any]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A__ )
def UpperCamelCase__ ( A__ ) -> Optional[Any]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case__ : Union[str, Any] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(A__ , id=A__ )
| 699 | 0 |
class __snake_case :
def __init__( self , __UpperCamelCase ) -> None:
'''simple docstring'''
snake_case__ : Tuple = len(__UpperCamelCase )
snake_case__ : List[Any] = [0] * len_array
if len_array > 0:
snake_case__ : Union[str, Any] = array[0]
for i in range(1 , __UpperCamelCase ):
snake_case__ : Dict = self.prefix_sum[i - 1] + array[i]
def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> int:
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __a ( self , __UpperCamelCase ) -> bool:
'''simple docstring'''
snake_case__ : List[str] = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__UpperCamelCase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 | def UpperCamelCase__ ( A__ ) -> list[int]:
if length <= 0 or not isinstance(A__ , A__ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(A__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 699 | 0 |
import torch
from transformers import AutoModel
class __snake_case ( torch.nn.Module ):
def __init__( self , __UpperCamelCase="sayef/fsner-bert-base-uncased" ) -> List[str]:
'''simple docstring'''
super(__UpperCamelCase , self ).__init__()
snake_case__ : Tuple = AutoModel.from_pretrained(__UpperCamelCase , return_dict=__UpperCamelCase )
snake_case__ : Optional[Any] = torch.nn.CosineSimilarity(3 , 1E-08 )
snake_case__ : Optional[int] = torch.nn.Softmax(dim=1 )
def __a ( self , **__UpperCamelCase ) -> Any:
'''simple docstring'''
return self.bert(**__UpperCamelCase ).last_hidden_state
def __a ( self , __UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=1 ) -> int:
'''simple docstring'''
return self.softmax(T * self.cos(__UpperCamelCase , __UpperCamelCase ) )
def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> Dict:
'''simple docstring'''
snake_case__ : Any = W_supports['sizes'].tolist()
snake_case__ : Union[str, Any] = W_supports['start_token_id'].item()
snake_case__ : str = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
snake_case__ : str = self.BERT(**__UpperCamelCase )
snake_case__ : Tuple = self.BERT(**__UpperCamelCase )
snake_case__ : List[Any] = None
snake_case__ : Any = None
snake_case__ : str = W_supports['input_ids'] == start_token_id
snake_case__ : Dict = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__UpperCamelCase ):
if i == 0:
snake_case__ : Dict = 0
else:
snake_case__ : Union[str, Any] = support_sizes[i - 1]
snake_case__ : int = S[s : s + size][start_token_masks[s : s + size]]
snake_case__ : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
snake_case__ : List[str] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
snake_case__ : Dict = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
snake_case__ : str = torch.vstack((p_starts, p_start) )
snake_case__ : Optional[Any] = torch.vstack((p_ends, p_end) )
else:
snake_case__ : Union[str, Any] = p_start
snake_case__ : List[Any] = p_end
return p_starts, p_ends
| 706 | import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase__ : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def UpperCamelCase__ ( A__ , A__ , A__ ) -> List[str]:
snake_case__ : int = state_dict.pop(A__ )
snake_case__ : Union[str, Any] = val
def UpperCamelCase__ ( A__ ) -> int:
snake_case__ : List[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case__ : Any = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
snake_case__ : Optional[int] = value
else:
snake_case__ : Optional[int] = value
return new_state_dict
def UpperCamelCase__ ( A__ , A__=False ) -> Optional[int]:
snake_case__ : Optional[int] = ''
if is_panoptic:
snake_case__ : Tuple = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
snake_case__ : str = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Union[str, Any] = in_proj_weight[:256, :]
snake_case__ : Union[str, Any] = in_proj_bias[:256]
snake_case__ : Union[str, Any] = in_proj_weight[256:512, :]
snake_case__ : Optional[Any] = in_proj_bias[256:512]
snake_case__ : List[str] = in_proj_weight[-256:, :]
snake_case__ : Tuple = in_proj_bias[-256:]
def UpperCamelCase__ ( ) -> Tuple:
snake_case__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : str = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( A__ , A__ ) -> str:
snake_case__ : List[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case__ : Any = 'resnet101'
if "dc5" in model_name:
snake_case__ : Any = True
snake_case__ : int = 'panoptic' in model_name
if is_panoptic:
snake_case__ : str = 250
else:
snake_case__ : Union[str, Any] = 91
snake_case__ : Optional[int] = 'huggingface/label-files'
snake_case__ : Optional[Any] = 'coco-detection-id2label.json'
snake_case__ : str = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
snake_case__ : List[Any] = {int(A__ ): v for k, v in idalabel.items()}
snake_case__ : Any = idalabel
snake_case__ : int = {v: k for k, v in idalabel.items()}
# load image processor
snake_case__ : List[Any] = 'coco_panoptic' if is_panoptic else 'coco_detection'
snake_case__ : List[Any] = ConditionalDetrImageProcessor(format=A__ )
# prepare image
snake_case__ : List[str] = prepare_img()
snake_case__ : Any = image_processor(images=A__ , return_tensors='pt' )
snake_case__ : Dict = encoding['pixel_values']
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
snake_case__ : Any = torch.hub.load('DeppMeng/ConditionalDETR' , A__ , pretrained=A__ ).eval()
snake_case__ : Tuple = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case__ : List[Any] = 'conditional_detr.' + src
rename_key(A__ , A__ , A__ )
snake_case__ : Dict = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case__ : Optional[int] = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
snake_case__ : List[Any] = state_dict.pop(A__ )
snake_case__ : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case__ : str = state_dict.pop(A__ )
snake_case__ : List[Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
snake_case__ : Union[str, Any] = state_dict.pop(A__ )
snake_case__ : Dict = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
snake_case__ : List[Any] = state_dict.pop(A__ )
snake_case__ : Optional[int] = val
# finally, create HuggingFace model and load state dict
snake_case__ : Union[str, Any] = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
model.push_to_hub(repo_id=A__ , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
snake_case__ : Tuple = conditional_detr(A__ )
snake_case__ : str = model(A__ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 699 | 0 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : List[Any] = '''▁'''
lowerCAmelCase__ : int = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class __snake_case ( _lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = BertGenerationTokenizer
__lowerCamelCase = False
__lowerCamelCase = True
def __a ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
snake_case__ : str = BertGenerationTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[str] = '<s>'
snake_case__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(__UpperCamelCase ) , 1002 )
def __a ( self ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[Any] = BertGenerationTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
snake_case__ : int = tokenizer.tokenize('This is a test' )
self.assertListEqual(__UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [285, 46, 10, 170, 382] , )
snake_case__ : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
snake_case__ : Optional[Any] = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case__ : int = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def __a ( self ) -> Dict:
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : int = 'Hello World!'
snake_case__ : Union[str, Any] = [18536, 2260, 101]
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@slow
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : str = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
snake_case__ : List[Any] = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@require_torch
@slow
def __a ( self ) -> List[str]:
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
snake_case__ : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case__ : Optional[int] = ' '.join(__UpperCamelCase )
snake_case__ : int = self.big_tokenizer.encode_plus(__UpperCamelCase , return_tensors='pt' , return_token_type_ids=__UpperCamelCase )
snake_case__ : Tuple = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=__UpperCamelCase )
snake_case__ : Dict = BertGenerationConfig()
snake_case__ : List[str] = BertGenerationEncoder(__UpperCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCamelCase )
model(**__UpperCamelCase )
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 707 | from collections import namedtuple
lowerCAmelCase__ : Union[str, Any] = namedtuple('''from_to''', '''from_ to''')
lowerCAmelCase__ : Tuple = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_01, 10_00),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_04_54, 2_64.1_72),
'''cubicyard''': from_to(0.7_64_55, 1.3_07_95),
'''cubicfoot''': from_to(0.0_28, 35.31_47),
'''cup''': from_to(0.0_00_23_65_88, 42_26.75),
}
def UpperCamelCase__ ( A__ , A__ , A__ ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ', '.join(A__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ', '.join(A__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 699 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = ["""image_processor""", """tokenizer"""]
__lowerCamelCase = """CLIPImageProcessor"""
__lowerCamelCase = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ) -> List[str]:
'''simple docstring'''
snake_case__ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __UpperCamelCase , )
snake_case__ : List[str] = kwargs.pop('feature_extractor' )
snake_case__ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
snake_case__ : str = self.tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if images is not None:
snake_case__ : Tuple = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if text is not None and images is not None:
snake_case__ : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def __a ( self , *__UpperCamelCase , **__UpperCamelCase ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def __a ( self , *__UpperCamelCase , **__UpperCamelCase ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = self.tokenizer.model_input_names
snake_case__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 708 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Tuple = logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] = '''▁'''
lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ : Optional[Any] = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
lowerCAmelCase__ : str = {
'''facebook/xglm-564M''': 20_48,
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCamelCase , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase = None , **__UpperCamelCase , ) -> None:
'''simple docstring'''
snake_case__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case__ : Tuple = 7
snake_case__ : Dict = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case__ : Union[str, Any] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
snake_case__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
snake_case__ : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case__ : Tuple = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case__ : Tuple = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
snake_case__ : List[Any] = len(self.sp_model )
snake_case__ : Optional[Any] = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__UpperCamelCase )
snake_case__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = self.__dict__.copy()
snake_case__ : Optional[Any] = None
snake_case__ : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case__ : Any = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case__ : str = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __a ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase ))
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase ))
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case__ : int = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __a ( self ) -> Tuple:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def __a ( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case__ : Optional[Any] = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __a ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
snake_case__ : int = ''.join(__UpperCamelCase ).replace(__UpperCamelCase , ' ' ).strip()
return out_string
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ : List[str] = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , 'wb' ) as fi:
snake_case__ : Any = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 699 | 0 |
from __future__ import annotations
import math
def UpperCamelCase__ ( A__ , A__ ) -> float:
snake_case__ : Dict = u
for i in range(1 , A__ ):
snake_case__ : List[str] = temp * (u - i)
return temp
def UpperCamelCase__ ( ) -> None:
snake_case__ : List[str] = int(input('enter the numbers of values: ' ) )
snake_case__ : list[list[float]] = []
for _ in range(A__ ):
y.append([] )
for i in range(A__ ):
for j in range(A__ ):
y[i].append(A__ )
snake_case__ : List[Any] = 0
print('enter the values of parameters in a list: ' )
snake_case__ : str = list(map(A__ , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(A__ ):
snake_case__ : Tuple = float(input() )
snake_case__ : Tuple = int(input('enter the value to interpolate: ' ) )
snake_case__ : Union[str, Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , A__ ):
for j in range(n - i ):
snake_case__ : Any = y[j + 1][i - 1] - y[j][i - 1]
snake_case__ : int = y[0][0]
for i in range(1 , A__ ):
summ += (ucal(A__ , A__ ) * y[0][i]) / math.factorial(A__ )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 709 | import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCAmelCase__ : Any = logging.get_logger(__name__)
lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : Any = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Any = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Tuple = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Dict = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_12,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_12,
}
lowerCAmelCase__ : Union[str, Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_12,
'''facebook/dpr-question_encoder-multiset-base''': 5_12,
}
lowerCAmelCase__ : Optional[Any] = {
'''facebook/dpr-reader-single-nq-base''': 5_12,
'''facebook/dpr-reader-multiset-base''': 5_12,
}
lowerCAmelCase__ : Tuple = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase__ : Any = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase__ : List[str] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = DPRContextEncoderTokenizer
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = DPRQuestionEncoderTokenizer
lowerCAmelCase__ : Tuple = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCAmelCase__ : List[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCAmelCase__ : int = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(_lowerCamelCase )
class __snake_case :
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
elif titles is None or texts is None:
snake_case__ : Optional[Any] = titles if texts is None else texts
return super().__call__(
__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ : int = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles]
snake_case__ : Optional[int] = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts]
snake_case__ : List[Any] = len(__UpperCamelCase )
snake_case__ : str = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages
assert len(__UpperCamelCase ) == len(
__UpperCamelCase ), F"""There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts."""
snake_case__ : Optional[int] = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids']
snake_case__ : Optional[Any] = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids']
snake_case__ : Union[str, Any] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase )
]
}
if return_attention_mask is not False:
snake_case__ : List[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
snake_case__ : Union[str, Any] = attention_mask
return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = 64 , __UpperCamelCase = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
snake_case__ : Optional[Any] = reader_input['input_ids']
snake_case__ , snake_case__ , snake_case__ : Any = reader_output[:3]
snake_case__ : List[str] = len(__UpperCamelCase )
snake_case__ : Tuple = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ )
snake_case__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
snake_case__ : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
snake_case__ : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
snake_case__ : Union[str, Any] = sequence_ids.index(self.pad_token_id )
else:
snake_case__ : str = len(__UpperCamelCase )
snake_case__ : Dict = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
snake_case__ : Any = []
for start_index, start_score in enumerate(__UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
snake_case__ : str = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase )
snake_case__ : Any = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
snake_case__ : str = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_lowerCamelCase )
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = ["""input_ids""", """attention_mask"""]
__lowerCamelCase = DPRReaderTokenizer
| 699 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
lowerCAmelCase__ : Any = logging.getLogger(__name__)
@dataclass
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = field(
default=0.0 ,metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
__lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """Whether to SortishSamler or not."""} )
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
__lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """whether to use adafactor"""} )
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
__lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """Dropout probability. Goes into model.config."""} )
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
__lowerCamelCase = field(
default="""linear""" ,metadata={"""help""": F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} ,)
| 710 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __a ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
snake_case__ : Any = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
torch.manual_seed(0 )
snake_case__ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case__ : Tuple = CLIPTextModel(__UpperCamelCase )
snake_case__ : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : Union[str, Any] = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' )
if str(__UpperCamelCase ).startswith('mps' ):
snake_case__ : str = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ : Dict = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case__ : str = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[int] = self.get_dummy_components()
snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : Optional[int] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Tuple = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : List[str] = sd_pipe(**__UpperCamelCase ).images
snake_case__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ : str = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Union[str, Any] = self.get_dummy_components()
snake_case__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : List[Any] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Union[str, Any] = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : List[str] = 'french fries'
snake_case__ : Optional[Any] = sd_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase )
snake_case__ : Union[str, Any] = output.images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ : Any = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : List[str] = self.get_dummy_components()
snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : str = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Dict = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : Any = [inputs['prompt']] * 2
snake_case__ : Optional[int] = np.array(inputs['image'] ).astype(np.floataa ) / 2_5_5.0
snake_case__ : Optional[int] = torch.from_numpy(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase )
snake_case__ : Any = image / 2 + 0.5
snake_case__ : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
snake_case__ : List[Any] = image.repeat(2 , 1 , 1 , 1 )
snake_case__ : Optional[int] = sd_pipe(**__UpperCamelCase ).images
snake_case__ : Union[str, Any] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
snake_case__ : List[Any] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[int] = self.get_dummy_components()
snake_case__ : Tuple = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' )
snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : List[str] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : str = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : Any = sd_pipe(**__UpperCamelCase ).images
snake_case__ : int = image[0, -3:, -3:, -1]
snake_case__ : Tuple = [round(__UpperCamelCase , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(__UpperCamelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
snake_case__ : List[Any] = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> int:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Optional[int] = self.get_dummy_components()
snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : Union[str, Any] = VaeImageProcessor(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase )
snake_case__ : Optional[int] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Optional[Any] = pipe(**self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' ) )[0]
snake_case__ : Union[str, Any] = components['vae']
snake_case__ : str = self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
snake_case__ : List[str] = vae.encode(inputs[image_param] ).latent_dist.mode()
snake_case__ : Dict = pipe(**__UpperCamelCase )[0]
snake_case__ : str = np.abs(out - out_latents_inputs ).max()
self.assertLess(__UpperCamelCase , 1E-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self , __UpperCamelCase=0 ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = torch.manual_seed(__UpperCamelCase )
snake_case__ : List[str] = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
snake_case__ : int = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : Tuple = self.get_inputs()
snake_case__ : List[Any] = pipe(**__UpperCamelCase ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case__ : Dict = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase )
snake_case__ : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : Dict = self.get_inputs()
snake_case__ : Dict = pipe(**__UpperCamelCase ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case__ : List[Any] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase )
snake_case__ : Tuple = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : Optional[int] = self.get_inputs()
snake_case__ : Optional[int] = pipe(**__UpperCamelCase ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case__ : int = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = 0
def callback_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> None:
snake_case__ : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case__ : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
snake_case__ : int = latents[0, -3:, -3:, -1]
snake_case__ : List[str] = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
snake_case__ : Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
snake_case__ : Dict = latents[0, -3:, -3:, -1]
snake_case__ : Optional[Any] = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
snake_case__ : str = False
snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
snake_case__ : int = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : int = self.get_inputs()
pipe(**__UpperCamelCase , callback=__UpperCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __a ( self ) -> Any:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
snake_case__ : Dict = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case__ : str = self.get_inputs()
snake_case__ : Tuple = pipe(**__UpperCamelCase )
snake_case__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : int = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case__ : Tuple = inputs['image'].resize((504, 504) )
snake_case__ : str = 'timbrooks/instruct-pix2pix'
snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__UpperCamelCase , safety_checker=__UpperCamelCase , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : str = pipe(**__UpperCamelCase )
snake_case__ : List[Any] = output.images[0]
snake_case__ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
snake_case__ : List[str] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 699 | 0 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __snake_case ( datasets.BuilderConfig ):
__lowerCamelCase = None
__lowerCamelCase = """utf-8"""
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = True # deprecated
__lowerCamelCase = None # deprecated
__lowerCamelCase = 10 << 20 # 10MB
__lowerCamelCase = None
class __snake_case ( datasets.ArrowBasedBuilder ):
__lowerCamelCase = JsonConfig
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' )
snake_case__ : str = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' )
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' )
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
snake_case__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__UpperCamelCase , (str, list, tuple) ):
snake_case__ : Any = data_files
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case__ : Optional[Any] = [files]
snake_case__ : List[str] = [dl_manager.iter_files(__UpperCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
snake_case__ : List[Any] = []
for split_name, files in data_files.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case__ : List[Any] = [files]
snake_case__ : Any = [dl_manager.iter_files(__UpperCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__UpperCamelCase , gen_kwargs={'files': files} ) )
return splits
def __a ( self , __UpperCamelCase ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
snake_case__ : List[Any] = self.config.features.arrow_schema.field(__UpperCamelCase ).type
snake_case__ : List[str] = pa_table.append_column(__UpperCamelCase , pa.array([None] * len(__UpperCamelCase ) , type=__UpperCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case__ : List[str] = table_cast(__UpperCamelCase , self.config.features.arrow_schema )
return pa_table
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
snake_case__ : Union[str, Any] = json.load(__UpperCamelCase )
# We keep only the field we are interested in
snake_case__ : Tuple = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__UpperCamelCase , (list, tuple) ):
snake_case__ : List[Any] = set().union(*[row.keys() for row in dataset] )
snake_case__ : List[Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys}
else:
snake_case__ : List[Any] = dataset
snake_case__ : Dict = pa.Table.from_pydict(__UpperCamelCase )
yield file_idx, self._cast_table(__UpperCamelCase )
# If the file has one json object per line
else:
with open(__UpperCamelCase , 'rb' ) as f:
snake_case__ : Optional[int] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
snake_case__ : Tuple = max(self.config.chunksize // 32 , 16 << 10 )
snake_case__ : Optional[Any] = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
snake_case__ : Optional[int] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__UpperCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
snake_case__ : int = batch.decode(self.config.encoding , errors=__UpperCamelCase ).encode('utf-8' )
try:
while True:
try:
snake_case__ : List[str] = paj.read_json(
io.BytesIO(__UpperCamelCase ) , read_options=paj.ReadOptions(block_size=__UpperCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__UpperCamelCase , pa.ArrowInvalid )
and "straddling" not in str(__UpperCamelCase )
or block_size > len(__UpperCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(__UpperCamelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
snake_case__ : Tuple = json.load(__UpperCamelCase )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__UpperCamelCase , __UpperCamelCase ): # list is the only sequence type supported in JSON
try:
snake_case__ : str = set().union(*[row.keys() for row in dataset] )
snake_case__ : Union[str, Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys}
snake_case__ : List[str] = pa.Table.from_pydict(__UpperCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(__UpperCamelCase )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__UpperCamelCase )
batch_idx += 1
| 711 | from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 699 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase__ : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def UpperCamelCase__ ( A__ , A__ , A__ ) -> List[str]:
snake_case__ : int = state_dict.pop(A__ )
snake_case__ : Union[str, Any] = val
def UpperCamelCase__ ( A__ ) -> int:
snake_case__ : List[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case__ : Any = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
snake_case__ : Optional[int] = value
else:
snake_case__ : Optional[int] = value
return new_state_dict
def UpperCamelCase__ ( A__ , A__=False ) -> Optional[int]:
snake_case__ : Optional[int] = ''
if is_panoptic:
snake_case__ : Tuple = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
snake_case__ : str = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Union[str, Any] = in_proj_weight[:256, :]
snake_case__ : Union[str, Any] = in_proj_bias[:256]
snake_case__ : Union[str, Any] = in_proj_weight[256:512, :]
snake_case__ : Optional[Any] = in_proj_bias[256:512]
snake_case__ : List[str] = in_proj_weight[-256:, :]
snake_case__ : Tuple = in_proj_bias[-256:]
def UpperCamelCase__ ( ) -> Tuple:
snake_case__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : str = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( A__ , A__ ) -> str:
snake_case__ : List[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case__ : Any = 'resnet101'
if "dc5" in model_name:
snake_case__ : Any = True
snake_case__ : int = 'panoptic' in model_name
if is_panoptic:
snake_case__ : str = 250
else:
snake_case__ : Union[str, Any] = 91
snake_case__ : Optional[int] = 'huggingface/label-files'
snake_case__ : Optional[Any] = 'coco-detection-id2label.json'
snake_case__ : str = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
snake_case__ : List[Any] = {int(A__ ): v for k, v in idalabel.items()}
snake_case__ : Any = idalabel
snake_case__ : int = {v: k for k, v in idalabel.items()}
# load image processor
snake_case__ : List[Any] = 'coco_panoptic' if is_panoptic else 'coco_detection'
snake_case__ : List[Any] = ConditionalDetrImageProcessor(format=A__ )
# prepare image
snake_case__ : List[str] = prepare_img()
snake_case__ : Any = image_processor(images=A__ , return_tensors='pt' )
snake_case__ : Dict = encoding['pixel_values']
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
snake_case__ : Any = torch.hub.load('DeppMeng/ConditionalDETR' , A__ , pretrained=A__ ).eval()
snake_case__ : Tuple = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case__ : List[Any] = 'conditional_detr.' + src
rename_key(A__ , A__ , A__ )
snake_case__ : Dict = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case__ : Optional[int] = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
snake_case__ : List[Any] = state_dict.pop(A__ )
snake_case__ : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case__ : str = state_dict.pop(A__ )
snake_case__ : List[Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
snake_case__ : Union[str, Any] = state_dict.pop(A__ )
snake_case__ : Dict = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
snake_case__ : List[Any] = state_dict.pop(A__ )
snake_case__ : Optional[int] = val
# finally, create HuggingFace model and load state dict
snake_case__ : Union[str, Any] = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
model.push_to_hub(repo_id=A__ , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
snake_case__ : Tuple = conditional_detr(A__ )
snake_case__ : str = model(A__ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 712 | from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class __snake_case :
__lowerCamelCase = field(
metadata={"""help""": """The output directory where the model will be written."""} ,)
__lowerCamelCase = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} ,)
__lowerCamelCase = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} ,)
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def UpperCamelCase__ ( ) -> Union[str, Any]:
snake_case__ : str = HfArgumentParser((ModelArguments,) )
((snake_case__) , ) : Dict = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
snake_case__ : List[str] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
snake_case__ : Optional[int] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
snake_case__ : Optional[Any] = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
snake_case__ : List[str] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
snake_case__ : Any = True
snake_case__ : Dict = True
snake_case__ : Tuple = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=A__ , decoder_config=A__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
snake_case__ : Optional[Any] = decoder_config.decoder_start_token_id
snake_case__ : Tuple = decoder_config.pad_token_id
if decoder_start_token_id is None:
snake_case__ : Optional[Any] = decoder_config.bos_token_id
if pad_token_id is None:
snake_case__ : int = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
snake_case__ : Union[str, Any] = decoder_config.eos_token_id
snake_case__ : Optional[int] = decoder_start_token_id
snake_case__ : int = pad_token_id
snake_case__ : Tuple = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
snake_case__ : int = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 699 | 0 |
from math import sqrt
def UpperCamelCase__ ( A__ ) -> bool:
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' must been an int and positive"
snake_case__ : List[Any] = True
# 0 and 1 are none primes.
if number <= 1:
snake_case__ : Any = False
for divisor in range(2 , int(round(sqrt(A__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
snake_case__ : Optional[Any] = False
break
# precondition
assert isinstance(A__ , A__ ), "'status' must been from type bool"
return status
def UpperCamelCase__ ( A__ ) -> int:
assert isinstance(A__ , A__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
snake_case__ : Optional[int] = list(range(2 , n + 1 ) )
snake_case__ : Any = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(A__ ) ):
for j in range(i + 1 , len(A__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
snake_case__ : Optional[int] = 0
# filters actual prime numbers.
snake_case__ : Any = [x for x in begin_list if x != 0]
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def UpperCamelCase__ ( A__ ) -> Tuple:
assert isinstance(A__ , A__ ) and (n > 2), "'N' must been an int and > 2"
snake_case__ : Dict = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(A__ ):
ans.append(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def UpperCamelCase__ ( A__ ) -> List[str]:
assert isinstance(A__ , A__ ) and number >= 0, "'number' must been an int and >= 0"
snake_case__ : List[Any] = [] # this list will be returns of the function.
# potential prime number factors.
snake_case__ : Tuple = 2
snake_case__ : int = number
if number == 0 or number == 1:
ans.append(A__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(A__ ):
while quotient != 1:
if is_prime(A__ ) and (quotient % factor == 0):
ans.append(A__ )
quotient /= factor
else:
factor += 1
else:
ans.append(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def UpperCamelCase__ ( A__ ) -> Any:
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
snake_case__ : int = 0
# prime factorization of 'number'
snake_case__ : Optional[Any] = prime_factorization(A__ )
snake_case__ : str = max(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type int"
return ans
def UpperCamelCase__ ( A__ ) -> Union[str, Any]:
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
snake_case__ : str = 0
# prime factorization of 'number'
snake_case__ : str = prime_factorization(A__ )
snake_case__ : Optional[int] = min(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type int"
return ans
def UpperCamelCase__ ( A__ ) -> Optional[Any]:
assert isinstance(A__ , A__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , A__ ), "compare bust been from type bool"
return number % 2 == 0
def UpperCamelCase__ ( A__ ) -> List[str]:
assert isinstance(A__ , A__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , A__ ), "compare bust been from type bool"
return number % 2 != 0
def UpperCamelCase__ ( A__ ) -> Union[str, Any]:
assert (
isinstance(A__ , A__ ) and (number > 2) and is_even(A__ )
), "'number' must been an int, even and > 2"
snake_case__ : List[str] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
snake_case__ : Dict = get_prime_numbers(A__ )
snake_case__ : str = len(A__ )
# run variable for while-loops.
snake_case__ : Tuple = 0
snake_case__ : Dict = None
# exit variable. for break up the loops
snake_case__ : List[str] = True
while i < len_pn and loop:
snake_case__ : Union[str, Any] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
snake_case__ : str = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(A__ , A__ )
and (len(A__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def UpperCamelCase__ ( A__ , A__ ) -> Dict:
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
snake_case__ : Union[str, Any] = 0
while numbera != 0:
snake_case__ : Tuple = numbera % numbera
snake_case__ : Optional[Any] = numbera
snake_case__ : int = rest
# precondition
assert isinstance(A__ , A__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def UpperCamelCase__ ( A__ , A__ ) -> Optional[int]:
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
snake_case__ : Optional[Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
snake_case__ : int = prime_factorization(A__ )
snake_case__ : int = prime_factorization(A__ )
elif numbera == 1 or numbera == 1:
snake_case__ : List[Any] = []
snake_case__ : List[str] = []
snake_case__ : List[str] = max(A__ , A__ )
snake_case__ : List[str] = 0
snake_case__ : Optional[Any] = 0
snake_case__ : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
snake_case__ : List[str] = prime_fac_a.count(A__ )
snake_case__ : Tuple = prime_fac_a.count(A__ )
for _ in range(max(A__ , A__ ) ):
ans *= n
else:
snake_case__ : Optional[Any] = prime_fac_a.count(A__ )
for _ in range(A__ ):
ans *= n
done.append(A__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
snake_case__ : int = prime_fac_a.count(A__ )
for _ in range(A__ ):
ans *= n
done.append(A__ )
# precondition
assert isinstance(A__ , A__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def UpperCamelCase__ ( A__ ) -> str:
assert isinstance(A__ , A__ ) and (n >= 0), "'number' must been a positive int"
snake_case__ : Optional[Any] = 0
snake_case__ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(A__ ):
ans += 1
# precondition
assert isinstance(A__ , A__ ) and is_prime(
A__ ), "'ans' must been a prime number and from type int"
return ans
def UpperCamelCase__ ( A__ , A__ ) -> Optional[Any]:
assert (
is_prime(A__ ) and is_prime(A__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
snake_case__ : int = p_number_a + 1 # jump to the next number
snake_case__ : Union[str, Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(A__ ):
number += 1
while number < p_number_a:
ans.append(A__ )
number += 1
# fetch the next prime number.
while not is_prime(A__ ):
number += 1
# precondition
assert (
isinstance(A__ , A__ )
and ans[0] != p_number_a
and ans[len(A__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def UpperCamelCase__ ( A__ ) -> Optional[Any]:
assert isinstance(A__ , A__ ) and (n >= 1), "'n' must been int and >= 1"
snake_case__ : Optional[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(A__ )
# precondition
assert ans[0] == 1 and ans[len(A__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def UpperCamelCase__ ( A__ ) -> Any:
assert isinstance(A__ , A__ ) and (
number > 1
), "'number' must been an int and >= 1"
snake_case__ : Optional[int] = get_divisors(A__ )
# precondition
assert (
isinstance(A__ , A__ )
and (divisors[0] == 1)
and (divisors[len(A__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def UpperCamelCase__ ( A__ , A__ ) -> Tuple:
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
snake_case__ : Optional[int] = gcd(abs(A__ ) , abs(A__ ) )
# precondition
assert (
isinstance(A__ , A__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def UpperCamelCase__ ( A__ ) -> Tuple:
assert isinstance(A__ , A__ ) and (n >= 0), "'n' must been a int and >= 0"
snake_case__ : Dict = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def UpperCamelCase__ ( A__ ) -> Dict:
assert isinstance(A__ , A__ ) and (n >= 0), "'n' must been an int and >= 0"
snake_case__ : List[Any] = 0
snake_case__ : Optional[Any] = 1
snake_case__ : Any = 1 # this will be return
for _ in range(n - 1 ):
snake_case__ : List[str] = ans
ans += fiba
snake_case__ : Union[str, Any] = tmp
return ans
| 713 | import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ , A__ = None , ) -> Optional[int]:
snake_case__ : List[str] = {}
if train_file is not None:
snake_case__ : Tuple = [train_file]
if eval_file is not None:
snake_case__ : Dict = [eval_file]
if test_file is not None:
snake_case__ : str = [test_file]
snake_case__ : Optional[Any] = datasets.load_dataset('csv' , data_files=A__ )
snake_case__ : Any = list(ds[list(files.keys() )[0]].features.keys() )
snake_case__ : Optional[Any] = features_name.pop(A__ )
snake_case__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case__ : str = {label: i for i, label in enumerate(A__ )}
snake_case__ : int = tokenizer.model_input_names
snake_case__ : int = {}
if len(A__ ) == 1:
for k in files.keys():
snake_case__ : str = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=A__ , max_length=A__ , padding='max_length' ) , batched=A__ , )
elif len(A__ ) == 2:
for k in files.keys():
snake_case__ : Optional[int] = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=A__ , max_length=A__ , padding='max_length' , ) , batched=A__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case__ : int = {k: v for k, v in ex.items() if k in input_names}
snake_case__ : Any = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case__ : int = {k: v for k, v in ex.items() if k in input_names}
snake_case__ : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case__ : Dict = {k: v for k, v in ex.items() if k in input_names}
snake_case__ : List[str] = labelaid[ex[label_name]]
yield (d, label)
snake_case__ : Any = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case__ : str = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case__ : Optional[int] = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case__ : Optional[int] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case__ : List[str] = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case__ : str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase__ : List[str] = logging.getLogger(__name__)
@dataclass
class __snake_case :
__lowerCamelCase = field(metadata={"""help""": """Which column contains the label"""} )
__lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the training file"""} )
__lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the development file"""} )
__lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the test file"""} )
__lowerCamelCase = field(
default=128 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class __snake_case :
__lowerCamelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
def UpperCamelCase__ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case__ , snake_case__ , snake_case__ : Dict = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=A__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case__ : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
def compute_metrics(A__ ) -> Dict:
snake_case__ : Optional[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case__ : Any = TFTrainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case__ : Dict = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case__ : Tuple = trainer.evaluate()
snake_case__ : Any = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(A__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(A__ )
return results
if __name__ == "__main__":
main()
| 699 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ : int = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 714 | from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__)
class __snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
__lowerCamelCase = None
__lowerCamelCase = None
class __snake_case ( folder_based_builder.FolderBasedBuilder ):
__lowerCamelCase = datasets.Audio()
__lowerCamelCase = """audio"""
__lowerCamelCase = AudioFolderConfig
__lowerCamelCase = 42 # definition at the bottom of the script
__lowerCamelCase = AudioClassification(audio_column="""audio""" ,label_column="""label""" )
lowerCAmelCase__ : Tuple = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
lowerCAmelCase__ : List[Any] = AUDIO_EXTENSIONS
| 699 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase__ ( A__ , A__=False ) -> Union[str, Any]:
snake_case__ : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCamelCase__ ( A__ , A__ , A__=False ) -> Any:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Optional[Any] = ''
else:
snake_case__ : List[Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
snake_case__ : str = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Dict = in_proj_bias[: config.hidden_size]
snake_case__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : List[Any] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( A__ ) -> Any:
snake_case__ : str = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Tuple:
snake_case__ : Any = dct.pop(A__ )
snake_case__ : Optional[int] = val
def UpperCamelCase__ ( ) -> Tuple:
snake_case__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : List[Any] = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( A__ , A__ ) -> List[Any]:
snake_case__ : List[Any] = ViTConfig()
snake_case__ : List[Any] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case__ : List[Any] = True
snake_case__ : Union[str, Any] = int(vit_name[-12:-10] )
snake_case__ : Tuple = int(vit_name[-9:-6] )
else:
snake_case__ : List[str] = 1000
snake_case__ : Optional[int] = 'huggingface/label-files'
snake_case__ : Dict = 'imagenet-1k-id2label.json'
snake_case__ : List[Any] = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
snake_case__ : Tuple = {int(A__ ): v for k, v in idalabel.items()}
snake_case__ : List[str] = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case__ : Union[str, Any] = int(vit_name[-6:-4] )
snake_case__ : Tuple = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
snake_case__ : List[Any] = 192
snake_case__ : Tuple = 768
snake_case__ : List[Any] = 12
snake_case__ : Dict = 3
elif vit_name[9:].startswith('small' ):
snake_case__ : Union[str, Any] = 384
snake_case__ : Optional[int] = 1536
snake_case__ : Optional[Any] = 12
snake_case__ : Any = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
snake_case__ : Dict = 768
snake_case__ : int = 2304
snake_case__ : str = 8
snake_case__ : Optional[Any] = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
snake_case__ : List[Any] = 1024
snake_case__ : Union[str, Any] = 4096
snake_case__ : Optional[Any] = 24
snake_case__ : Dict = 16
elif vit_name[4:].startswith('huge' ):
snake_case__ : Union[str, Any] = 1280
snake_case__ : str = 5120
snake_case__ : List[Any] = 32
snake_case__ : Optional[int] = 16
# load original model from timm
snake_case__ : List[Any] = timm.create_model(A__ , pretrained=A__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(A__ )
snake_case__ : int = create_rename_keys(A__ , A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , A__ , A__ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ : Optional[int] = ViTModel(A__ ).eval()
else:
snake_case__ : str = ViTForImageClassification(A__ ).eval()
model.load_state_dict(A__ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case__ : Tuple = DeiTImageProcessor(size=config.image_size )
else:
snake_case__ : Optional[int] = ViTImageProcessor(size=config.image_size )
snake_case__ : Dict = image_processor(images=prepare_img() , return_tensors='pt' )
snake_case__ : str = encoding['pixel_values']
snake_case__ : int = model(A__ )
if base_model:
snake_case__ : Optional[int] = timm_model.forward_features(A__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(A__ , outputs.pooler_output , atol=1e-3 )
else:
snake_case__ : List[str] = timm_model(A__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A__ , outputs.logits , atol=1e-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase__ : List[str] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 715 | import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = IFInpaintingPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
return self._get_dummy_components()
def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> str:
'''simple docstring'''
if str(__UpperCamelCase ).startswith('mps' ):
snake_case__ : int = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ : Union[str, Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __a ( self ) -> List[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __a ( self ) -> List[str]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __a ( self ) -> List[str]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __a ( self ) -> int:
'''simple docstring'''
self._test_save_load_local()
def __a ( self ) -> List[str]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 699 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCAmelCase__ : Any = logging.get_logger(__name__)
lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : Any = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Any = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Tuple = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Dict = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_12,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_12,
}
lowerCAmelCase__ : Union[str, Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_12,
'''facebook/dpr-question_encoder-multiset-base''': 5_12,
}
lowerCAmelCase__ : Optional[Any] = {
'''facebook/dpr-reader-single-nq-base''': 5_12,
'''facebook/dpr-reader-multiset-base''': 5_12,
}
lowerCAmelCase__ : Tuple = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase__ : Any = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase__ : List[str] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = DPRContextEncoderTokenizer
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = DPRQuestionEncoderTokenizer
lowerCAmelCase__ : Tuple = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCAmelCase__ : List[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCAmelCase__ : int = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(_lowerCamelCase )
class __snake_case :
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
elif titles is None or texts is None:
snake_case__ : Optional[Any] = titles if texts is None else texts
return super().__call__(
__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ : int = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles]
snake_case__ : Optional[int] = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts]
snake_case__ : List[Any] = len(__UpperCamelCase )
snake_case__ : str = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages
assert len(__UpperCamelCase ) == len(
__UpperCamelCase ), F"""There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts."""
snake_case__ : Optional[int] = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids']
snake_case__ : Optional[Any] = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids']
snake_case__ : Union[str, Any] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase )
]
}
if return_attention_mask is not False:
snake_case__ : List[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
snake_case__ : Union[str, Any] = attention_mask
return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = 64 , __UpperCamelCase = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
snake_case__ : Optional[Any] = reader_input['input_ids']
snake_case__ : Any = reader_output[:3]
snake_case__ : List[str] = len(__UpperCamelCase )
snake_case__ : Tuple = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ )
snake_case__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
snake_case__ : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
snake_case__ : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
snake_case__ : Union[str, Any] = sequence_ids.index(self.pad_token_id )
else:
snake_case__ : str = len(__UpperCamelCase )
snake_case__ : Dict = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
snake_case__ : Any = []
for start_index, start_score in enumerate(__UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
snake_case__ : str = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase )
snake_case__ : Any = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
snake_case__ : str = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_lowerCamelCase )
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = ["""input_ids""", """attention_mask"""]
__lowerCamelCase = DPRReaderTokenizer
| 716 | import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : List[Any] = '''▁'''
lowerCAmelCase__ : int = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class __snake_case ( _lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = BertGenerationTokenizer
__lowerCamelCase = False
__lowerCamelCase = True
def __a ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
snake_case__ : str = BertGenerationTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[str] = '<s>'
snake_case__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(__UpperCamelCase ) , 1002 )
def __a ( self ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[Any] = BertGenerationTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
snake_case__ : int = tokenizer.tokenize('This is a test' )
self.assertListEqual(__UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [285, 46, 10, 170, 382] , )
snake_case__ : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
snake_case__ : Optional[Any] = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case__ : int = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def __a ( self ) -> Dict:
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : int = 'Hello World!'
snake_case__ : Union[str, Any] = [18536, 2260, 101]
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@slow
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : str = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
snake_case__ : List[Any] = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@require_torch
@slow
def __a ( self ) -> List[str]:
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
snake_case__ : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case__ : Optional[int] = ' '.join(__UpperCamelCase )
snake_case__ : int = self.big_tokenizer.encode_plus(__UpperCamelCase , return_tensors='pt' , return_token_type_ids=__UpperCamelCase )
snake_case__ : Tuple = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=__UpperCamelCase )
snake_case__ : Dict = BertGenerationConfig()
snake_case__ : List[str] = BertGenerationEncoder(__UpperCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCamelCase )
model(**__UpperCamelCase )
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 699 | 0 |
import datasets
lowerCAmelCase__ : str = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
lowerCAmelCase__ : Dict = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
lowerCAmelCase__ : int = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def UpperCamelCase__ ( A__ , A__ ) -> Union[str, Any]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def __a ( self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
return {"accuracy": simple_accuracy(__UpperCamelCase , __UpperCamelCase )}
| 717 | import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCAmelCase__ : List[str] = HfApi()
lowerCAmelCase__ : str = {}
# fmt: off
lowerCAmelCase__ : int = torch.tensor([
-0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67,
1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89,
-1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39,
0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57
])
lowerCAmelCase__ : Dict = torch.tensor([
-2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36,
1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08,
-2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48,
2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65
])
lowerCAmelCase__ : Dict = torch.tensor([
-0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69,
-0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04,
-0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25,
0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43
])
lowerCAmelCase__ : List[str] = torch.tensor([
0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72,
-0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09,
0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05,
-0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05
])
lowerCAmelCase__ : Union[str, Any] = torch.tensor([
0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33,
-0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95,
0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59,
-0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86
])
lowerCAmelCase__ : List[Any] = torch.tensor([
0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78,
-0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30,
0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83,
-0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31
])
lowerCAmelCase__ : Optional[Any] = torch.tensor([
0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42,
-0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98,
0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74,
-0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90
])
lowerCAmelCase__ : List[str] = torch.tensor([
0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42,
-0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90,
0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46,
-0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73
])
lowerCAmelCase__ : List[str] = torch.tensor([
-1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30,
1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43,
-2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10,
1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51])
lowerCAmelCase__ : List[Any] = torch.tensor([
-1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24,
0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81,
-2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59,
1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66
])
lowerCAmelCase__ : Tuple = torch.tensor([
-1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12,
0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27,
-2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31,
1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55
])
lowerCAmelCase__ : List[str] = torch.tensor([
-2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59,
1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51,
-3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41,
3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66
])
lowerCAmelCase__ : Dict = torch.tensor([
-2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40,
1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98,
-2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95,
2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43
])
lowerCAmelCase__ : Dict = torch.tensor([
-2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36,
1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08,
-3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60,
3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43
])
lowerCAmelCase__ : Any = torch.tensor([
-1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44,
1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91,
-2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39,
1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19
])
# fmt: on
lowerCAmelCase__ : Any = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCAmelCase__ : List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith('''CompVis'''):
lowerCAmelCase__ : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
lowerCAmelCase__ : str = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCAmelCase__ : Any = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCAmelCase__ : List[str] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowerCAmelCase__ : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 699 | 0 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __snake_case :
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.0_2 , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase="None" , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , ) -> List[Any]:
'''simple docstring'''
snake_case__ : Dict = parent
snake_case__ : str = batch_size
snake_case__ : List[str] = seq_length
snake_case__ : Optional[Any] = is_training
snake_case__ : List[str] = use_input_mask
snake_case__ : Any = use_token_type_ids
snake_case__ : Optional[Any] = use_labels
snake_case__ : str = vocab_size
snake_case__ : Dict = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : Dict = num_attention_heads
snake_case__ : List[str] = intermediate_size
snake_case__ : int = hidden_act
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : str = max_position_embeddings
snake_case__ : Optional[int] = type_vocab_size
snake_case__ : Optional[Any] = type_sequence_label_size
snake_case__ : int = initializer_range
snake_case__ : Dict = num_labels
snake_case__ : Tuple = num_choices
snake_case__ : str = relative_attention
snake_case__ : Optional[int] = position_biased_input
snake_case__ : Dict = pos_att_type
snake_case__ : int = scope
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Dict = None
if self.use_input_mask:
snake_case__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Dict = None
if self.use_token_type_ids:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : str = None
snake_case__ : Dict = None
snake_case__ : str = None
if self.use_labels:
snake_case__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Union[str, Any] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
'''simple docstring'''
snake_case__ : List[str] = TFDebertaVaModel(config=__UpperCamelCase )
snake_case__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
snake_case__ : Dict = [input_ids, input_mask]
snake_case__ : Tuple = model(__UpperCamelCase )
snake_case__ : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[Any] = TFDebertaVaForMaskedLM(config=__UpperCamelCase )
snake_case__ : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case__ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
'''simple docstring'''
snake_case__ : List[str] = self.num_labels
snake_case__ : int = TFDebertaVaForSequenceClassification(config=__UpperCamelCase )
snake_case__ : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case__ : Dict = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Dict = self.num_labels
snake_case__ : Union[str, Any] = TFDebertaVaForTokenClassification(config=__UpperCamelCase )
snake_case__ : List[str] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case__ : Dict = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
'''simple docstring'''
snake_case__ : List[str] = TFDebertaVaForQuestionAnswering(config=__UpperCamelCase )
snake_case__ : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case__ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : List[Any] = self.prepare_config_and_inputs()
(
snake_case__
) : str = config_and_inputs
snake_case__ : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__lowerCamelCase = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Dict = TFDebertaVaModelTester(self )
snake_case__ : Union[str, Any] = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def __a ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Any = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class __snake_case ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def __a ( self ) -> Tuple:
'''simple docstring'''
pass
@slow
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : List[str] = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
snake_case__ : Tuple = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
snake_case__ : Any = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case__ : str = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
snake_case__ : Optional[int] = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1E-4 )
| 718 | import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
class __snake_case ( _lowerCamelCase ):
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
| 699 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : Optional[Any] = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
lowerCAmelCase__ : int = {'''mobilebert-uncased''': 5_12}
lowerCAmelCase__ : Optional[int] = {}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = MobileBertTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase="[UNK]" , __UpperCamelCase="[SEP]" , __UpperCamelCase="[PAD]" , __UpperCamelCase="[CLS]" , __UpperCamelCase="[MASK]" , __UpperCamelCase=True , __UpperCamelCase=None , **__UpperCamelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , __UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __UpperCamelCase ) != tokenize_chinese_chars
):
snake_case__ : Any = getattr(__UpperCamelCase , normalizer_state.pop('type' ) )
snake_case__ : List[str] = do_lower_case
snake_case__ : str = strip_accents
snake_case__ : List[Any] = tokenize_chinese_chars
snake_case__ : Union[str, Any] = normalizer_class(**__UpperCamelCase )
snake_case__ : Any = do_lower_case
def __a ( self , __UpperCamelCase , __UpperCamelCase=None ) -> List[str]:
'''simple docstring'''
snake_case__ : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = [self.sep_token_id]
snake_case__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case__ : Tuple = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 719 | import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __snake_case ( datasets.BuilderConfig ):
__lowerCamelCase = None
__lowerCamelCase = "utf-8"
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = True # deprecated
__lowerCamelCase = None # deprecated
__lowerCamelCase = 10 << 20 # 10MB
__lowerCamelCase = None
class __snake_case ( datasets.ArrowBasedBuilder ):
__lowerCamelCase = JsonConfig
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' )
snake_case__ : str = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' )
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' )
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
snake_case__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__UpperCamelCase , (str, list, tuple) ):
snake_case__ : Any = data_files
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case__ : Optional[Any] = [files]
snake_case__ : List[str] = [dl_manager.iter_files(__UpperCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
snake_case__ : List[Any] = []
for split_name, files in data_files.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case__ : List[Any] = [files]
snake_case__ : Any = [dl_manager.iter_files(__UpperCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__UpperCamelCase , gen_kwargs={'files': files} ) )
return splits
def __a ( self , __UpperCamelCase ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
snake_case__ : List[Any] = self.config.features.arrow_schema.field(__UpperCamelCase ).type
snake_case__ : List[str] = pa_table.append_column(__UpperCamelCase , pa.array([None] * len(__UpperCamelCase ) , type=__UpperCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case__ : List[str] = table_cast(__UpperCamelCase , self.config.features.arrow_schema )
return pa_table
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
snake_case__ : Union[str, Any] = json.load(__UpperCamelCase )
# We keep only the field we are interested in
snake_case__ : Tuple = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__UpperCamelCase , (list, tuple) ):
snake_case__ : List[Any] = set().union(*[row.keys() for row in dataset] )
snake_case__ : List[Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys}
else:
snake_case__ : List[Any] = dataset
snake_case__ : Dict = pa.Table.from_pydict(__UpperCamelCase )
yield file_idx, self._cast_table(__UpperCamelCase )
# If the file has one json object per line
else:
with open(__UpperCamelCase , 'rb' ) as f:
snake_case__ : Optional[int] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
snake_case__ : Tuple = max(self.config.chunksize // 32 , 16 << 10 )
snake_case__ : Optional[Any] = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
snake_case__ : Optional[int] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__UpperCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
snake_case__ : int = batch.decode(self.config.encoding , errors=__UpperCamelCase ).encode('utf-8' )
try:
while True:
try:
snake_case__ : List[str] = paj.read_json(
io.BytesIO(__UpperCamelCase ) , read_options=paj.ReadOptions(block_size=__UpperCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__UpperCamelCase , pa.ArrowInvalid )
and "straddling" not in str(__UpperCamelCase )
or block_size > len(__UpperCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(__UpperCamelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
snake_case__ : Tuple = json.load(__UpperCamelCase )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__UpperCamelCase , __UpperCamelCase ): # list is the only sequence type supported in JSON
try:
snake_case__ : str = set().union(*[row.keys() for row in dataset] )
snake_case__ : Union[str, Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys}
snake_case__ : List[str] = pa.Table.from_pydict(__UpperCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(__UpperCamelCase )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__UpperCamelCase )
batch_idx += 1
| 699 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowerCAmelCase__ : int = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowerCAmelCase__ : str = '''UperNetConfig'''
class __snake_case ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0 , __UpperCamelCase = False , __UpperCamelCase = 1 , ) -> None:
'''simple docstring'''
super().__init__()
snake_case__ : Union[str, Any] = nn.Convad(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , kernel_size=__UpperCamelCase , padding=__UpperCamelCase , bias=__UpperCamelCase , dilation=__UpperCamelCase , )
snake_case__ : Tuple = nn.BatchNormad(__UpperCamelCase )
snake_case__ : Tuple = nn.ReLU()
def __a ( self , __UpperCamelCase ) -> torch.Tensor:
'''simple docstring'''
snake_case__ : Tuple = self.conv(__UpperCamelCase )
snake_case__ : str = self.batch_norm(__UpperCamelCase )
snake_case__ : Dict = self.activation(__UpperCamelCase )
return output
class __snake_case ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> None:
'''simple docstring'''
super().__init__()
snake_case__ : Dict = [
nn.AdaptiveAvgPoolad(__UpperCamelCase ),
UperNetConvModule(__UpperCamelCase , __UpperCamelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__UpperCamelCase ) , __UpperCamelCase )
def __a ( self , __UpperCamelCase ) -> torch.Tensor:
'''simple docstring'''
snake_case__ : Optional[int] = input
for layer in self.layers:
snake_case__ : int = layer(__UpperCamelCase )
return hidden_state
class __snake_case ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> None:
'''simple docstring'''
super().__init__()
snake_case__ : str = pool_scales
snake_case__ : List[str] = align_corners
snake_case__ : List[str] = in_channels
snake_case__ : int = channels
snake_case__ : Union[str, Any] = []
for i, pool_scale in enumerate(__UpperCamelCase ):
snake_case__ : Optional[Any] = UperNetPyramidPoolingBlock(pool_scale=__UpperCamelCase , in_channels=__UpperCamelCase , channels=__UpperCamelCase )
self.blocks.append(__UpperCamelCase )
self.add_module(str(__UpperCamelCase ) , __UpperCamelCase )
def __a ( self , __UpperCamelCase ) -> List[torch.Tensor]:
'''simple docstring'''
snake_case__ : Optional[int] = []
for ppm in self.blocks:
snake_case__ : List[Any] = ppm(__UpperCamelCase )
snake_case__ : Optional[int] = nn.functional.interpolate(
__UpperCamelCase , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(__UpperCamelCase )
return ppm_outs
class __snake_case ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase ) -> int:
'''simple docstring'''
super().__init__()
snake_case__ : List[Any] = config
snake_case__ : Any = config.pool_scales # e.g. (1, 2, 3, 6)
snake_case__ : Tuple = in_channels
snake_case__ : int = config.hidden_size
snake_case__ : List[Any] = False
snake_case__ : Tuple = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
snake_case__ : List[Any] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
snake_case__ : Dict = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
snake_case__ : Optional[Any] = nn.ModuleList()
snake_case__ : Optional[int] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
snake_case__ : str = UperNetConvModule(__UpperCamelCase , self.channels , kernel_size=1 )
snake_case__ : Tuple = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__UpperCamelCase )
self.fpn_convs.append(__UpperCamelCase )
snake_case__ : str = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __a ( self ) -> List[str]:
'''simple docstring'''
self.apply(self._init_weights )
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
if isinstance(__UpperCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __a ( self , __UpperCamelCase ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = inputs[-1]
snake_case__ : str = [x]
psp_outs.extend(self.psp_modules(__UpperCamelCase ) )
snake_case__ : Any = torch.cat(__UpperCamelCase , dim=1 )
snake_case__ : Tuple = self.bottleneck(__UpperCamelCase )
return output
def __a ( self , __UpperCamelCase ) -> torch.Tensor:
'''simple docstring'''
snake_case__ : int = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__UpperCamelCase ) )
# build top-down path
snake_case__ : str = len(__UpperCamelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
snake_case__ : Optional[int] = laterals[i - 1].shape[2:]
snake_case__ : List[str] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__UpperCamelCase , mode='bilinear' , align_corners=self.align_corners )
# build outputs
snake_case__ : Union[str, Any] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
snake_case__ : str = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
snake_case__ : List[str] = torch.cat(__UpperCamelCase , dim=1 )
snake_case__ : str = self.fpn_bottleneck(__UpperCamelCase )
snake_case__ : str = self.classifier(__UpperCamelCase )
return output
class __snake_case ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase = 2 , __UpperCamelCase = 3 , __UpperCamelCase = 1 ) -> None:
'''simple docstring'''
super().__init__()
snake_case__ : Any = config
snake_case__ : List[str] = config.auxiliary_in_channels
snake_case__ : Any = config.auxiliary_channels
snake_case__ : Tuple = config.auxiliary_num_convs
snake_case__ : List[Any] = config.auxiliary_concat_input
snake_case__ : Union[str, Any] = in_index
snake_case__ : int = (kernel_size // 2) * dilation
snake_case__ : str = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__UpperCamelCase , padding=__UpperCamelCase , dilation=__UpperCamelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__UpperCamelCase , padding=__UpperCamelCase , dilation=__UpperCamelCase ) )
if self.num_convs == 0:
snake_case__ : Tuple = nn.Identity()
else:
snake_case__ : Dict = nn.Sequential(*__UpperCamelCase )
if self.concat_input:
snake_case__ : List[Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__UpperCamelCase , padding=kernel_size // 2 )
snake_case__ : Any = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
self.apply(self._init_weights )
def __a ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
if isinstance(__UpperCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __a ( self , __UpperCamelCase ) -> torch.Tensor:
'''simple docstring'''
snake_case__ : Tuple = encoder_hidden_states[self.in_index]
snake_case__ : Any = self.convs(__UpperCamelCase )
if self.concat_input:
snake_case__ : Optional[Any] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
snake_case__ : Union[str, Any] = self.classifier(__UpperCamelCase )
return output
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = UperNetConfig
__lowerCamelCase = """pixel_values"""
__lowerCamelCase = True
def __a ( self , __UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __a ( self ) -> Any:
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __a ( self , __UpperCamelCase , __UpperCamelCase=False ) -> List[Any]:
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case__ : Optional[int] = value
lowerCAmelCase__ : int = r'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCAmelCase__ : Tuple = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" ,_lowerCamelCase ,)
class __snake_case ( _lowerCamelCase ):
def __init__( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(__UpperCamelCase )
snake_case__ : List[str] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
snake_case__ : Any = UperNetHead(__UpperCamelCase , in_channels=self.backbone.channels )
snake_case__ : Union[str, Any] = UperNetFCNHead(__UpperCamelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=__UpperCamelCase , config_class=_CONFIG_FOR_DOC )
def __a ( self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , ) -> Union[tuple, SemanticSegmenterOutput]:
'''simple docstring'''
snake_case__ : Any = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
snake_case__ : List[str] = self.backbone.forward_with_filtered_kwargs(
__UpperCamelCase , output_hidden_states=__UpperCamelCase , output_attentions=__UpperCamelCase )
snake_case__ : Any = outputs.feature_maps
snake_case__ : Tuple = self.decode_head(__UpperCamelCase )
snake_case__ : Optional[Any] = nn.functional.interpolate(__UpperCamelCase , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=__UpperCamelCase )
snake_case__ : str = None
if self.auxiliary_head is not None:
snake_case__ : int = self.auxiliary_head(__UpperCamelCase )
snake_case__ : Any = nn.functional.interpolate(
__UpperCamelCase , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=__UpperCamelCase )
snake_case__ : Tuple = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
snake_case__ : List[str] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
snake_case__ : Optional[int] = loss_fct(__UpperCamelCase , __UpperCamelCase )
snake_case__ : str = loss_fct(__UpperCamelCase , __UpperCamelCase )
snake_case__ : Dict = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
snake_case__ : int = (logits,) + outputs[1:]
else:
snake_case__ : Any = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__UpperCamelCase , logits=__UpperCamelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 720 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ : Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 699 | 0 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Any:
# Initialise PyTorch model
snake_case__ : Dict = TaConfig.from_json_file(A__ )
print(F"""Building PyTorch model from configuration: {config}""" )
snake_case__ : Union[str, Any] = TaForConditionalGeneration(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(A__ , A__ , A__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 721 | from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCAmelCase__ : Dict = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCAmelCase__ : List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCAmelCase__ : List[str] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def UpperCamelCase__ ( A__ , A__ ) -> tuple[str, float]:
snake_case__ : Tuple = len([g for position, g in enumerate(A__ ) if g == main_target[position]] )
return (item, float(A__ ))
def UpperCamelCase__ ( A__ , A__ ) -> tuple[str, str]:
snake_case__ : str = random.randint(0 , len(A__ ) - 1 )
snake_case__ : int = parent_a[:random_slice] + parent_a[random_slice:]
snake_case__ : Any = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def UpperCamelCase__ ( A__ , A__ ) -> str:
snake_case__ : List[Any] = list(A__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
snake_case__ : Optional[Any] = random.choice(A__ )
return "".join(A__ )
def UpperCamelCase__ ( A__ , A__ , A__ , ) -> list[str]:
snake_case__ : Tuple = []
# Generate more children proportionally to the fitness score.
snake_case__ : Optional[Any] = int(parent_a[1] * 100 ) + 1
snake_case__ : str = 10 if child_n >= 10 else child_n
for _ in range(A__ ):
snake_case__ : Any = population_score[random.randint(0 , A__ )][0]
snake_case__ , snake_case__ : int = crossover(parent_a[0] , A__ )
# Append new string to the population list.
pop.append(mutate(A__ , A__ ) )
pop.append(mutate(A__ , A__ ) )
return pop
def UpperCamelCase__ ( A__ , A__ , A__ = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
snake_case__ : Union[str, Any] = F"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(A__ )
# Verify that the target contains no genes besides the ones inside genes variable.
snake_case__ : Tuple = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
snake_case__ : int = F"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(A__ )
# Generate random starting population.
snake_case__ : Union[str, Any] = []
for _ in range(A__ ):
population.append(''.join([random.choice(A__ ) for i in range(len(A__ ) )] ) )
# Just some logs to know what the algorithms is doing.
snake_case__ , snake_case__ : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(A__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
snake_case__ : List[Any] = [evaluate(A__ , A__ ) for item in population]
# Check if there is a matching evolution.
snake_case__ : int = sorted(A__ , key=lambda A__ : x[1] , reverse=A__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"""\nGeneration: {generation}"""
F"""\nTotal Population:{total_population}"""
F"""\nBest score: {population_score[0][1]}"""
F"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
snake_case__ : Optional[int] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(A__ )
# Normalize population score to be between 0 and 1.
snake_case__ : str = [
(item, score / len(A__ )) for item, score in population_score
]
# This is selection
for i in range(A__ ):
population.extend(select(population_score[int(A__ )] , A__ , A__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(A__ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCAmelCase__ : str = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
lowerCAmelCase__ : Optional[Any] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ : List[str] = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 699 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class __snake_case ( _lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = SpeechTaTokenizer
__lowerCamelCase = False
__lowerCamelCase = True
def __a ( self ) -> str:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Dict = SpeechTaTokenizer(__UpperCamelCase )
snake_case__ : List[str] = AddedToken('<mask>' , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase )
snake_case__ : int = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self , __UpperCamelCase ) -> Any:
'''simple docstring'''
snake_case__ : int = 'this is a test'
snake_case__ : Optional[Any] = 'this is a test'
return input_text, output_text
def __a ( self , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=20 , __UpperCamelCase=5 ) -> int:
'''simple docstring'''
snake_case__ : Any = self.get_input_output_texts(__UpperCamelCase )
snake_case__ : Optional[int] = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
snake_case__ : List[Any] = tokenizer.decode(__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase )
return text, ids
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = '<pad>'
snake_case__ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(__UpperCamelCase ) , 81 )
def __a ( self ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = self.get_tokenizers(do_lower_case=__UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case__ : List[Any] = tokenizer.vocab_size
snake_case__ : Optional[Any] = len(__UpperCamelCase )
self.assertNotEqual(__UpperCamelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case__ : Tuple = ['aaaaa bbbbbb', 'cccccccccdddddddd']
snake_case__ : int = tokenizer.add_tokens(__UpperCamelCase )
snake_case__ : Optional[Any] = tokenizer.vocab_size
snake_case__ : Optional[int] = len(__UpperCamelCase )
self.assertNotEqual(__UpperCamelCase , 0 )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , len(__UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , all_size + len(__UpperCamelCase ) )
snake_case__ : Optional[Any] = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=__UpperCamelCase )
self.assertGreaterEqual(len(__UpperCamelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
snake_case__ : Union[str, Any] = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
snake_case__ : Optional[int] = tokenizer.add_special_tokens(__UpperCamelCase )
snake_case__ : List[str] = tokenizer.vocab_size
snake_case__ : Union[str, Any] = len(__UpperCamelCase )
self.assertNotEqual(__UpperCamelCase , 0 )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , len(__UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , all_size_a + len(__UpperCamelCase ) )
snake_case__ : List[str] = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=__UpperCamelCase )
self.assertGreaterEqual(len(__UpperCamelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
pass
def __a ( self ) -> List[Any]:
'''simple docstring'''
pass
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Any = self.get_tokenizer()
snake_case__ : List[str] = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(__UpperCamelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
snake_case__ : Dict = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
snake_case__ : Dict = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
# fmt: off
self.assertListEqual(__UpperCamelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
snake_case__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
snake_case__ : Tuple = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=__UpperCamelCase , )
| 700 | from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase__ : Optional[int] = TypeVar('''T''')
class __snake_case ( Generic[T] ):
def __init__( self , __UpperCamelCase ) -> Any:
'''simple docstring'''
snake_case__ : Optional[int] = data
snake_case__ : Node[T] | None = None
def __str__( self ) -> str:
'''simple docstring'''
return F"""{self.data}"""
class __snake_case ( Generic[T] ):
def __init__( self ) -> None:
'''simple docstring'''
snake_case__ : Node[T] | None = None
def __iter__( self ) -> Iterator[T]:
'''simple docstring'''
snake_case__ : str = self.top
while node:
yield node.data
snake_case__ : Dict = node.next
def __str__( self ) -> str:
'''simple docstring'''
return "->".join([str(__UpperCamelCase ) for item in self] )
def __len__( self ) -> int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __a ( self ) -> bool:
'''simple docstring'''
return self.top is None
def __a ( self , __UpperCamelCase ) -> None:
'''simple docstring'''
snake_case__ : str = Node(__UpperCamelCase )
if not self.is_empty():
snake_case__ : List[str] = self.top
snake_case__ : Tuple = node
def __a ( self ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , __UpperCamelCase )
snake_case__ : List[str] = self.top
snake_case__ : Union[str, Any] = self.top.next
return pop_node.data
def __a ( self ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def __a ( self ) -> None:
'''simple docstring'''
snake_case__ : Any = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 699 | 0 |
import random
from typing import Any
def UpperCamelCase__ ( A__ ) -> list[Any]:
for _ in range(len(A__ ) ):
snake_case__ : List[Any] = random.randint(0 , len(A__ ) - 1 )
snake_case__ : Union[str, Any] = random.randint(0 , len(A__ ) - 1 )
snake_case__ : Dict = data[b], data[a]
return data
if __name__ == "__main__":
lowerCAmelCase__ : str = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase__ : Tuple = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 701 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : int = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = """poolformer"""
def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=16 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=4.0 , __UpperCamelCase=[2, 2, 6, 2] , __UpperCamelCase=[64, 128, 320, 512] , __UpperCamelCase=[7, 3, 3, 3] , __UpperCamelCase=[4, 2, 2, 2] , __UpperCamelCase=[2, 1, 1, 1] , __UpperCamelCase=4 , __UpperCamelCase=0.0 , __UpperCamelCase="gelu" , __UpperCamelCase=True , __UpperCamelCase=1E-5 , __UpperCamelCase=0.0_2 , **__UpperCamelCase , ) -> Any:
'''simple docstring'''
snake_case__ : List[str] = num_channels
snake_case__ : Dict = patch_size
snake_case__ : Optional[int] = stride
snake_case__ : str = padding
snake_case__ : List[str] = pool_size
snake_case__ : List[Any] = hidden_sizes
snake_case__ : List[Any] = mlp_ratio
snake_case__ : Union[str, Any] = depths
snake_case__ : Dict = patch_sizes
snake_case__ : Dict = strides
snake_case__ : Dict = num_encoder_blocks
snake_case__ : Union[str, Any] = drop_path_rate
snake_case__ : List[str] = hidden_act
snake_case__ : Optional[Any] = use_layer_scale
snake_case__ : int = layer_scale_init_value
snake_case__ : Dict = initializer_range
super().__init__(**__UpperCamelCase )
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = version.parse("""1.11""" )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __a ( self ) -> float:
'''simple docstring'''
return 2E-3
| 699 | 0 |
from math import isqrt
def UpperCamelCase__ ( A__ ) -> list[int]:
snake_case__ : Optional[Any] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , A__ , A__ ):
snake_case__ : Union[str, Any] = False
return [i for i in range(2 , A__ ) if is_prime[i]]
def UpperCamelCase__ ( A__ = 10**8 ) -> int:
snake_case__ : Dict = calculate_prime_numbers(max_number // 2 )
snake_case__ : Tuple = 0
snake_case__ : Any = 0
snake_case__ : int = len(A__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 702 | import numpy as np
import qiskit
def UpperCamelCase__ ( A__ = 8 , A__ = None ) -> str:
snake_case__ : Optional[int] = np.random.default_rng(seed=A__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
snake_case__ : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
snake_case__ : Tuple = rng.integers(2 , size=A__ )
# The set of states Alice will prepare.
snake_case__ : List[str] = rng.integers(2 , size=A__ )
# Measurement basis for Bob's qubits.
snake_case__ : List[Any] = rng.integers(2 , size=A__ )
# Quantum Circuit to simulate BB84
snake_case__ : Any = qiskit.QuantumCircuit(A__ , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(A__ ):
if alice_state[index] == 1:
bbaa_circ.x(A__ )
if alice_basis[index] == 1:
bbaa_circ.h(A__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(A__ ):
if bob_basis[index] == 1:
bbaa_circ.h(A__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
snake_case__ : List[str] = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
snake_case__ : Optional[Any] = qiskit.execute(A__ , A__ , shots=1 , seed_simulator=A__ )
# Returns the result of measurement.
snake_case__ : Union[str, Any] = job.result().get_counts(A__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
snake_case__ : Optional[Any] = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
A__ , A__ , A__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
snake_case__ : Tuple = gen_key[:key_len] if len(A__ ) >= key_len else gen_key.ljust(A__ , '0' )
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 699 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( A__ , A__ ) -> str:
return "\n".join(
F"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 703 | def UpperCamelCase__ ( A__ , A__ , A__ ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case__ : Dict = _modexpt(A__ , exponent // 2 , A__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A__ , exponent - 1 , A__ )) % modulo_value
def UpperCamelCase__ ( A__ = 1777 , A__ = 1855 , A__ = 8 ) -> int:
snake_case__ : Tuple = base
for _ in range(1 , A__ ):
snake_case__ : Any = _modexpt(A__ , A__ , 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 699 | 0 |
from timeit import timeit
def UpperCamelCase__ ( A__ ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError('the value of input must not be negative' )
snake_case__ : Optional[Any] = 0
while number:
number &= number - 1
result += 1
return result
def UpperCamelCase__ ( A__ ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError('the value of input must not be negative' )
snake_case__ : List[str] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
def do_benchmark(A__ ) -> None:
snake_case__ : Optional[int] = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(A__ ) = }""" )
snake_case__ : Optional[int] = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=A__ )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(A__ ) = }""" )
snake_case__ : List[Any] = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=A__ , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(A__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 704 | # tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ : Tuple = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCamelCase__ ( A__ ) -> Optional[Any]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A__ )
def UpperCamelCase__ ( A__ ) -> Optional[Any]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case__ : Union[str, Any] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(A__ , id=A__ )
| 699 | 0 |
def UpperCamelCase__ ( A__ = 1000 ) -> int:
return sum(e for e in range(3 , A__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 705 | def UpperCamelCase__ ( A__ ) -> list[int]:
if length <= 0 or not isinstance(A__ , A__ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(A__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 699 | 0 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__)
class __snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
__lowerCamelCase = None
__lowerCamelCase = None
class __snake_case ( folder_based_builder.FolderBasedBuilder ):
__lowerCamelCase = datasets.Audio()
__lowerCamelCase = """audio"""
__lowerCamelCase = AudioFolderConfig
__lowerCamelCase = 42 # definition at the bottom of the script
__lowerCamelCase = AudioClassification(audio_column="""audio""" ,label_column="""label""" )
lowerCAmelCase__ : Tuple = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
lowerCAmelCase__ : List[Any] = AUDIO_EXTENSIONS
| 706 | import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase__ : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def UpperCamelCase__ ( A__ , A__ , A__ ) -> List[str]:
snake_case__ : int = state_dict.pop(A__ )
snake_case__ : Union[str, Any] = val
def UpperCamelCase__ ( A__ ) -> int:
snake_case__ : List[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case__ : Any = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
snake_case__ : Optional[int] = value
else:
snake_case__ : Optional[int] = value
return new_state_dict
def UpperCamelCase__ ( A__ , A__=False ) -> Optional[int]:
snake_case__ : Optional[int] = ''
if is_panoptic:
snake_case__ : Tuple = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
snake_case__ : str = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Union[str, Any] = in_proj_weight[:256, :]
snake_case__ : Union[str, Any] = in_proj_bias[:256]
snake_case__ : Union[str, Any] = in_proj_weight[256:512, :]
snake_case__ : Optional[Any] = in_proj_bias[256:512]
snake_case__ : List[str] = in_proj_weight[-256:, :]
snake_case__ : Tuple = in_proj_bias[-256:]
def UpperCamelCase__ ( ) -> Tuple:
snake_case__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : str = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( A__ , A__ ) -> str:
snake_case__ : List[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case__ : Any = 'resnet101'
if "dc5" in model_name:
snake_case__ : Any = True
snake_case__ : int = 'panoptic' in model_name
if is_panoptic:
snake_case__ : str = 250
else:
snake_case__ : Union[str, Any] = 91
snake_case__ : Optional[int] = 'huggingface/label-files'
snake_case__ : Optional[Any] = 'coco-detection-id2label.json'
snake_case__ : str = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
snake_case__ : List[Any] = {int(A__ ): v for k, v in idalabel.items()}
snake_case__ : Any = idalabel
snake_case__ : int = {v: k for k, v in idalabel.items()}
# load image processor
snake_case__ : List[Any] = 'coco_panoptic' if is_panoptic else 'coco_detection'
snake_case__ : List[Any] = ConditionalDetrImageProcessor(format=A__ )
# prepare image
snake_case__ : List[str] = prepare_img()
snake_case__ : Any = image_processor(images=A__ , return_tensors='pt' )
snake_case__ : Dict = encoding['pixel_values']
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
snake_case__ : Any = torch.hub.load('DeppMeng/ConditionalDETR' , A__ , pretrained=A__ ).eval()
snake_case__ : Tuple = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case__ : List[Any] = 'conditional_detr.' + src
rename_key(A__ , A__ , A__ )
snake_case__ : Dict = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case__ : Optional[int] = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
snake_case__ : List[Any] = state_dict.pop(A__ )
snake_case__ : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case__ : str = state_dict.pop(A__ )
snake_case__ : List[Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
snake_case__ : Union[str, Any] = state_dict.pop(A__ )
snake_case__ : Dict = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
snake_case__ : List[Any] = state_dict.pop(A__ )
snake_case__ : Optional[int] = val
# finally, create HuggingFace model and load state dict
snake_case__ : Union[str, Any] = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
model.push_to_hub(repo_id=A__ , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
snake_case__ : Tuple = conditional_detr(A__ )
snake_case__ : str = model(A__ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 699 | 0 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __snake_case :
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=6 , __UpperCamelCase=17 , __UpperCamelCase=23 , __UpperCamelCase=11 , __UpperCamelCase=True , ) -> List[str]:
'''simple docstring'''
snake_case__ : str = parent
snake_case__ : Dict = batch_size
snake_case__ : Optional[int] = seq_length
snake_case__ : Optional[Any] = act_dim
snake_case__ : List[Any] = state_dim
snake_case__ : Optional[int] = hidden_size
snake_case__ : Dict = max_length
snake_case__ : List[str] = is_training
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
snake_case__ : Optional[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
snake_case__ : Dict = floats_tensor((self.batch_size, self.seq_length, 1) )
snake_case__ : str = floats_tensor((self.batch_size, self.seq_length, 1) )
snake_case__ : str = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
snake_case__ : Optional[int] = random_attention_mask((self.batch_size, self.seq_length) )
snake_case__ : Tuple = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __a ( self ) -> List[str]:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Any:
'''simple docstring'''
snake_case__ : Any = DecisionTransformerModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ : Union[str, Any] = model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = self.prepare_config_and_inputs()
(
snake_case__
) : List[str] = config_and_inputs
snake_case__ : List[Any] = {
'states': states,
'actions': actions,
'rewards': rewards,
'returns_to_go': returns_to_go,
'timesteps': timesteps,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_torch
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = (DecisionTransformerModel,) if is_torch_available() else ()
__lowerCamelCase = ()
__lowerCamelCase = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__lowerCamelCase = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = DecisionTransformerModelTester(self )
snake_case__ : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
@slow
def __a ( self ) -> str:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Optional[int] = DecisionTransformerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = model_class(__UpperCamelCase )
snake_case__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Dict = [*signature.parameters.keys()]
snake_case__ : Optional[int] = [
'states',
'actions',
'rewards',
'returns_to_go',
'timesteps',
'attention_mask',
]
self.assertListEqual(arg_names[: len(__UpperCamelCase )] , __UpperCamelCase )
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = 2 # number of steps of autoregressive prediction we will perform
snake_case__ : int = 10 # defined by the RL environment, may be normalized
snake_case__ : List[Any] = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' )
snake_case__ : Optional[Any] = model.to(__UpperCamelCase )
snake_case__ : Any = model.config
torch.manual_seed(0 )
snake_case__ : str = torch.randn(1 , 1 , config.state_dim ).to(device=__UpperCamelCase , dtype=torch.floataa ) # env.reset()
snake_case__ : str = torch.tensor(
[[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=__UpperCamelCase )
snake_case__ : Optional[Any] = torch.tensor(__UpperCamelCase , device=__UpperCamelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
snake_case__ : int = state
snake_case__ : Optional[int] = torch.zeros(1 , 0 , config.act_dim , device=__UpperCamelCase , dtype=torch.floataa )
snake_case__ : Dict = torch.zeros(1 , 0 , device=__UpperCamelCase , dtype=torch.floataa )
snake_case__ : Optional[int] = torch.tensor(0 , device=__UpperCamelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(__UpperCamelCase ):
snake_case__ : Any = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__UpperCamelCase )] , dim=1 )
snake_case__ : Any = torch.cat([rewards, torch.zeros(1 , 1 , device=__UpperCamelCase )] , dim=1 )
snake_case__ : Any = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
snake_case__ : List[str] = model(
states=__UpperCamelCase , actions=__UpperCamelCase , rewards=__UpperCamelCase , returns_to_go=__UpperCamelCase , timesteps=__UpperCamelCase , attention_mask=__UpperCamelCase , return_dict=__UpperCamelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
snake_case__ : List[str] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__UpperCamelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
snake_case__ : Tuple = action_pred[0, -1]
snake_case__ : List[Any] = torch.cat([states, state] , dim=1 )
snake_case__ : int = returns_to_go[0, -1] - reward
snake_case__ : Tuple = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
snake_case__ : List[str] = torch.cat(
[timesteps, torch.ones((1, 1) , device=__UpperCamelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 707 | from collections import namedtuple
lowerCAmelCase__ : Union[str, Any] = namedtuple('''from_to''', '''from_ to''')
lowerCAmelCase__ : Tuple = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_01, 10_00),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_04_54, 2_64.1_72),
'''cubicyard''': from_to(0.7_64_55, 1.3_07_95),
'''cubicfoot''': from_to(0.0_28, 35.31_47),
'''cup''': from_to(0.0_00_23_65_88, 42_26.75),
}
def UpperCamelCase__ ( A__ , A__ , A__ ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ', '.join(A__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ', '.join(A__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 699 | 0 |
from __future__ import annotations
def UpperCamelCase__ ( A__ , A__ ) -> bool:
snake_case__ : Optional[Any] = get_failure_array(A__ )
# 2) Step through text searching for pattern
snake_case__ : Any = 0, 0 # index into text, pattern
while i < len(A__ ):
if pattern[j] == text[i]:
if j == (len(A__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
snake_case__ : List[Any] = failure[j - 1]
continue
i += 1
return False
def UpperCamelCase__ ( A__ ) -> list[int]:
snake_case__ : Optional[int] = [0]
snake_case__ : Union[str, Any] = 0
snake_case__ : Tuple = 1
while j < len(A__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
snake_case__ : Optional[Any] = failure[i - 1]
continue
j += 1
failure.append(A__ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCAmelCase__ : Dict = '''abc1abc12'''
lowerCAmelCase__ : int = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCAmelCase__ : Optional[int] = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCAmelCase__ : Union[str, Any] = '''ABABX'''
lowerCAmelCase__ : Union[str, Any] = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCAmelCase__ : List[Any] = '''AAAB'''
lowerCAmelCase__ : str = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCAmelCase__ : List[Any] = '''abcdabcy'''
lowerCAmelCase__ : int = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCAmelCase__ : Tuple = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 708 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Tuple = logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] = '''▁'''
lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ : Optional[Any] = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
lowerCAmelCase__ : str = {
'''facebook/xglm-564M''': 20_48,
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCamelCase , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase = None , **__UpperCamelCase , ) -> None:
'''simple docstring'''
snake_case__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case__ : Tuple = 7
snake_case__ : Dict = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case__ : Union[str, Any] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
snake_case__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
snake_case__ : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case__ : Tuple = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case__ : Tuple = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
snake_case__ : List[Any] = len(self.sp_model )
snake_case__ : Optional[Any] = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__UpperCamelCase )
snake_case__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = self.__dict__.copy()
snake_case__ : Optional[Any] = None
snake_case__ : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case__ : Any = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case__ : str = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __a ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase ))
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase ))
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case__ : int = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __a ( self ) -> Tuple:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def __a ( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case__ : Optional[Any] = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __a ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
snake_case__ : int = ''.join(__UpperCamelCase ).replace(__UpperCamelCase , ' ' ).strip()
return out_string
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ : List[str] = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , 'wb' ) as fi:
snake_case__ : Any = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 699 | 0 |
import random
from .binary_exp_mod import bin_exp_mod
def UpperCamelCase__ ( A__ , A__=1000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
snake_case__ : Optional[int] = n - 1
snake_case__ : Union[str, Any] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
snake_case__ : Union[str, Any] = 0
while count < prec:
snake_case__ : Union[str, Any] = random.randint(2 , n - 1 )
snake_case__ : List[Any] = bin_exp_mod(A__ , A__ , A__ )
if b != 1:
snake_case__ : Any = True
for _ in range(A__ ):
if b == n - 1:
snake_case__ : Dict = False
break
snake_case__ : int = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 709 | import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCAmelCase__ : Any = logging.get_logger(__name__)
lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : Any = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Any = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Tuple = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Dict = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_12,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_12,
}
lowerCAmelCase__ : Union[str, Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_12,
'''facebook/dpr-question_encoder-multiset-base''': 5_12,
}
lowerCAmelCase__ : Optional[Any] = {
'''facebook/dpr-reader-single-nq-base''': 5_12,
'''facebook/dpr-reader-multiset-base''': 5_12,
}
lowerCAmelCase__ : Tuple = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase__ : Any = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase__ : List[str] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = DPRContextEncoderTokenizer
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = DPRQuestionEncoderTokenizer
lowerCAmelCase__ : Tuple = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCAmelCase__ : List[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCAmelCase__ : int = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(_lowerCamelCase )
class __snake_case :
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
elif titles is None or texts is None:
snake_case__ : Optional[Any] = titles if texts is None else texts
return super().__call__(
__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ : int = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles]
snake_case__ : Optional[int] = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts]
snake_case__ : List[Any] = len(__UpperCamelCase )
snake_case__ : str = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages
assert len(__UpperCamelCase ) == len(
__UpperCamelCase ), F"""There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts."""
snake_case__ : Optional[int] = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids']
snake_case__ : Optional[Any] = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids']
snake_case__ : Union[str, Any] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase )
]
}
if return_attention_mask is not False:
snake_case__ : List[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
snake_case__ : Union[str, Any] = attention_mask
return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = 64 , __UpperCamelCase = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
snake_case__ : Optional[Any] = reader_input['input_ids']
snake_case__ , snake_case__ , snake_case__ : Any = reader_output[:3]
snake_case__ : List[str] = len(__UpperCamelCase )
snake_case__ : Tuple = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ )
snake_case__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
snake_case__ : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
snake_case__ : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
snake_case__ : Union[str, Any] = sequence_ids.index(self.pad_token_id )
else:
snake_case__ : str = len(__UpperCamelCase )
snake_case__ : Dict = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
snake_case__ : Any = []
for start_index, start_score in enumerate(__UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
snake_case__ : str = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase )
snake_case__ : Any = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
snake_case__ : str = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_lowerCamelCase )
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = ["""input_ids""", """attention_mask"""]
__lowerCamelCase = DPRReaderTokenizer
| 699 | 0 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = """"""
__lowerCamelCase = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
snake_case__ : int = repo_info
snake_case__ : Dict = token
snake_case__ : Optional[int] = None
def __a ( self ) -> Dict:
'''simple docstring'''
if self.dir_cache is None:
snake_case__ : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
snake_case__ : Dict = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {'name': str(__UpperCamelCase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __a ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
snake_case__ : Tuple = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def __a ( self , __UpperCamelCase , **__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
self._get_dirs()
snake_case__ : Dict = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
self._get_dirs()
snake_case__ : Dict = PurePosixPath(path.strip('/' ) )
snake_case__ : Tuple = {}
for p, f in self.dir_cache.items():
snake_case__ : List[Any] = PurePosixPath(p.strip('/' ) )
snake_case__ : str = p.parent
if root == path:
snake_case__ : Any = f
snake_case__ : Any = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 710 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __a ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
snake_case__ : Any = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
torch.manual_seed(0 )
snake_case__ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case__ : Tuple = CLIPTextModel(__UpperCamelCase )
snake_case__ : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : Union[str, Any] = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' )
if str(__UpperCamelCase ).startswith('mps' ):
snake_case__ : str = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ : Dict = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case__ : str = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[int] = self.get_dummy_components()
snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : Optional[int] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Tuple = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : List[str] = sd_pipe(**__UpperCamelCase ).images
snake_case__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ : str = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Union[str, Any] = self.get_dummy_components()
snake_case__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : List[Any] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Union[str, Any] = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : List[str] = 'french fries'
snake_case__ : Optional[Any] = sd_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase )
snake_case__ : Union[str, Any] = output.images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ : Any = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : List[str] = self.get_dummy_components()
snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : str = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Dict = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : Any = [inputs['prompt']] * 2
snake_case__ : Optional[int] = np.array(inputs['image'] ).astype(np.floataa ) / 2_5_5.0
snake_case__ : Optional[int] = torch.from_numpy(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase )
snake_case__ : Any = image / 2 + 0.5
snake_case__ : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
snake_case__ : List[Any] = image.repeat(2 , 1 , 1 , 1 )
snake_case__ : Optional[int] = sd_pipe(**__UpperCamelCase ).images
snake_case__ : Union[str, Any] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
snake_case__ : List[Any] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[int] = self.get_dummy_components()
snake_case__ : Tuple = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' )
snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : List[str] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : str = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : Any = sd_pipe(**__UpperCamelCase ).images
snake_case__ : int = image[0, -3:, -3:, -1]
snake_case__ : Tuple = [round(__UpperCamelCase , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(__UpperCamelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
snake_case__ : List[Any] = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> int:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Optional[int] = self.get_dummy_components()
snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : Union[str, Any] = VaeImageProcessor(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase )
snake_case__ : Optional[int] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Optional[Any] = pipe(**self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' ) )[0]
snake_case__ : Union[str, Any] = components['vae']
snake_case__ : str = self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
snake_case__ : List[str] = vae.encode(inputs[image_param] ).latent_dist.mode()
snake_case__ : Dict = pipe(**__UpperCamelCase )[0]
snake_case__ : str = np.abs(out - out_latents_inputs ).max()
self.assertLess(__UpperCamelCase , 1E-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self , __UpperCamelCase=0 ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = torch.manual_seed(__UpperCamelCase )
snake_case__ : List[str] = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
snake_case__ : int = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : Tuple = self.get_inputs()
snake_case__ : List[Any] = pipe(**__UpperCamelCase ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case__ : Dict = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase )
snake_case__ : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : Dict = self.get_inputs()
snake_case__ : Dict = pipe(**__UpperCamelCase ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case__ : List[Any] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase )
snake_case__ : Tuple = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : Optional[int] = self.get_inputs()
snake_case__ : Optional[int] = pipe(**__UpperCamelCase ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case__ : int = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = 0
def callback_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> None:
snake_case__ : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case__ : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
snake_case__ : int = latents[0, -3:, -3:, -1]
snake_case__ : List[str] = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
snake_case__ : Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
snake_case__ : Dict = latents[0, -3:, -3:, -1]
snake_case__ : Optional[Any] = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
snake_case__ : str = False
snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
snake_case__ : int = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : int = self.get_inputs()
pipe(**__UpperCamelCase , callback=__UpperCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __a ( self ) -> Any:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
snake_case__ : Dict = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case__ : str = self.get_inputs()
snake_case__ : Tuple = pipe(**__UpperCamelCase )
snake_case__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : int = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case__ : Tuple = inputs['image'].resize((504, 504) )
snake_case__ : str = 'timbrooks/instruct-pix2pix'
snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__UpperCamelCase , safety_checker=__UpperCamelCase , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : str = pipe(**__UpperCamelCase )
snake_case__ : List[Any] = output.images[0]
snake_case__ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
snake_case__ : List[str] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 699 | 0 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@slow
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Any = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
snake_case__ : Optional[int] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(__UpperCamelCase )
from datasets import load_dataset
snake_case__ : Optional[int] = load_dataset('nielsr/rvlcdip-demo' )
snake_case__ : List[Any] = dataset['train'][0]['image'].convert('RGB' )
snake_case__ : Any = image_processor(__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : int = model(**__UpperCamelCase )
snake_case__ : Any = outputs.logits
snake_case__ : List[Any] = torch.Size((1, 16) )
self.assertEqual(logits.shape , __UpperCamelCase )
snake_case__ : List[str] = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=__UpperCamelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 711 | from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 699 | 0 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCAmelCase__ : List[str] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCAmelCase__ : int = '''main'''
# Default branch name
lowerCAmelCase__ : Dict = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'''
# One particular commit (not the top of `main`)
lowerCAmelCase__ : Optional[Any] = '''aaaaaaa'''
# This commit does not exist, so we should 404.
lowerCAmelCase__ : List[Any] = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'''
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCAmelCase__ : List[str] = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'''
@contextlib.contextmanager
def UpperCamelCase__ ( ) -> List[str]:
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def UpperCamelCase__ ( ) -> Optional[Any]:
print('Bonjour!' )
yield
print('Au revoir!' )
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class __snake_case ( unittest.TestCase ):
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def __a ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def __a ( self , __UpperCamelCase ) -> str:
'''simple docstring'''
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def __a ( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def __a ( self ) -> List[str]:
'''simple docstring'''
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels'] )
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__UpperCamelCase ) , ['start_positions', 'end_positions'] )
class __snake_case ( _lowerCamelCase ):
pass
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels'] )
@require_tf
def __a ( self ) -> int:
'''simple docstring'''
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels'] )
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__UpperCamelCase ) , ['start_positions', 'end_positions'] )
class __snake_case ( _lowerCamelCase ):
pass
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels'] )
@require_flax
def __a ( self ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
class __snake_case ( _lowerCamelCase ):
pass
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
| 712 | from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class __snake_case :
__lowerCamelCase = field(
metadata={"""help""": """The output directory where the model will be written."""} ,)
__lowerCamelCase = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} ,)
__lowerCamelCase = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} ,)
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def UpperCamelCase__ ( ) -> Union[str, Any]:
snake_case__ : str = HfArgumentParser((ModelArguments,) )
((snake_case__) , ) : Dict = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
snake_case__ : List[str] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
snake_case__ : Optional[int] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
snake_case__ : Optional[Any] = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
snake_case__ : List[str] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
snake_case__ : Any = True
snake_case__ : Dict = True
snake_case__ : Tuple = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=A__ , decoder_config=A__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
snake_case__ : Optional[Any] = decoder_config.decoder_start_token_id
snake_case__ : Tuple = decoder_config.pad_token_id
if decoder_start_token_id is None:
snake_case__ : Optional[Any] = decoder_config.bos_token_id
if pad_token_id is None:
snake_case__ : int = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
snake_case__ : Union[str, Any] = decoder_config.eos_token_id
snake_case__ : Optional[int] = decoder_start_token_id
snake_case__ : int = pad_token_id
snake_case__ : Tuple = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
snake_case__ : int = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 699 | 0 |
from copy import deepcopy
class __snake_case :
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None ) -> None:
'''simple docstring'''
if arr is None and size is not None:
snake_case__ : List[Any] = size
snake_case__ : Union[str, Any] = [0] * size
elif arr is not None:
self.init(__UpperCamelCase )
else:
raise ValueError('Either arr or size must be specified' )
def __a ( self , __UpperCamelCase ) -> None:
'''simple docstring'''
snake_case__ : Optional[int] = len(__UpperCamelCase )
snake_case__ : List[Any] = deepcopy(__UpperCamelCase )
for i in range(1 , self.size ):
snake_case__ : List[str] = self.next_(__UpperCamelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def __a ( self ) -> list[int]:
'''simple docstring'''
snake_case__ : int = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case__ : str = self.next_(__UpperCamelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __a ( __UpperCamelCase ) -> int:
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def __a ( __UpperCamelCase ) -> int:
'''simple docstring'''
return index - (index & (-index))
def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> None:
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case__ : Any = self.next_(__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> None:
'''simple docstring'''
self.add(__UpperCamelCase , value - self.get(__UpperCamelCase ) )
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
if right == 0:
return 0
snake_case__ : Union[str, Any] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case__ : Optional[Any] = self.prev(__UpperCamelCase )
return result
def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> int:
'''simple docstring'''
return self.prefix(__UpperCamelCase ) - self.prefix(__UpperCamelCase )
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
return self.query(__UpperCamelCase , index + 1 )
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
snake_case__ : Optional[int] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case__ : Any = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 | import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ , A__ = None , ) -> Optional[int]:
snake_case__ : List[str] = {}
if train_file is not None:
snake_case__ : Tuple = [train_file]
if eval_file is not None:
snake_case__ : Dict = [eval_file]
if test_file is not None:
snake_case__ : str = [test_file]
snake_case__ : Optional[Any] = datasets.load_dataset('csv' , data_files=A__ )
snake_case__ : Any = list(ds[list(files.keys() )[0]].features.keys() )
snake_case__ : Optional[Any] = features_name.pop(A__ )
snake_case__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case__ : str = {label: i for i, label in enumerate(A__ )}
snake_case__ : int = tokenizer.model_input_names
snake_case__ : int = {}
if len(A__ ) == 1:
for k in files.keys():
snake_case__ : str = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=A__ , max_length=A__ , padding='max_length' ) , batched=A__ , )
elif len(A__ ) == 2:
for k in files.keys():
snake_case__ : Optional[int] = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=A__ , max_length=A__ , padding='max_length' , ) , batched=A__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case__ : int = {k: v for k, v in ex.items() if k in input_names}
snake_case__ : Any = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case__ : int = {k: v for k, v in ex.items() if k in input_names}
snake_case__ : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case__ : Dict = {k: v for k, v in ex.items() if k in input_names}
snake_case__ : List[str] = labelaid[ex[label_name]]
yield (d, label)
snake_case__ : Any = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case__ : str = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case__ : Optional[int] = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case__ : Optional[int] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case__ : List[str] = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case__ : str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase__ : List[str] = logging.getLogger(__name__)
@dataclass
class __snake_case :
__lowerCamelCase = field(metadata={"""help""": """Which column contains the label"""} )
__lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the training file"""} )
__lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the development file"""} )
__lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the test file"""} )
__lowerCamelCase = field(
default=128 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class __snake_case :
__lowerCamelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
def UpperCamelCase__ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case__ , snake_case__ , snake_case__ : Dict = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=A__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case__ : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
def compute_metrics(A__ ) -> Dict:
snake_case__ : Optional[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case__ : Any = TFTrainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case__ : Dict = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case__ : Tuple = trainer.evaluate()
snake_case__ : Any = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(A__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(A__ )
return results
if __name__ == "__main__":
main()
| 699 | 0 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCAmelCase__ : List[str] = HfApi()
lowerCAmelCase__ : str = {}
# fmt: off
lowerCAmelCase__ : int = torch.tensor([
-0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67,
1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89,
-1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39,
0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57
])
lowerCAmelCase__ : Dict = torch.tensor([
-2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36,
1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08,
-2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48,
2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65
])
lowerCAmelCase__ : Dict = torch.tensor([
-0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69,
-0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04,
-0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25,
0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43
])
lowerCAmelCase__ : List[str] = torch.tensor([
0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72,
-0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09,
0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05,
-0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05
])
lowerCAmelCase__ : Union[str, Any] = torch.tensor([
0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33,
-0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95,
0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59,
-0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86
])
lowerCAmelCase__ : List[Any] = torch.tensor([
0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78,
-0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30,
0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83,
-0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31
])
lowerCAmelCase__ : Optional[Any] = torch.tensor([
0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42,
-0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98,
0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74,
-0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90
])
lowerCAmelCase__ : List[str] = torch.tensor([
0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42,
-0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90,
0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46,
-0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73
])
lowerCAmelCase__ : List[str] = torch.tensor([
-1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30,
1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43,
-2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10,
1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51])
lowerCAmelCase__ : List[Any] = torch.tensor([
-1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24,
0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81,
-2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59,
1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66
])
lowerCAmelCase__ : Tuple = torch.tensor([
-1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12,
0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27,
-2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31,
1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55
])
lowerCAmelCase__ : List[str] = torch.tensor([
-2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59,
1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51,
-3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41,
3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66
])
lowerCAmelCase__ : Dict = torch.tensor([
-2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40,
1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98,
-2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95,
2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43
])
lowerCAmelCase__ : Dict = torch.tensor([
-2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36,
1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08,
-3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60,
3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43
])
lowerCAmelCase__ : Any = torch.tensor([
-1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44,
1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91,
-2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39,
1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19
])
# fmt: on
lowerCAmelCase__ : Any = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCAmelCase__ : List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith('''CompVis'''):
lowerCAmelCase__ : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
lowerCAmelCase__ : str = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCAmelCase__ : Any = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCAmelCase__ : List[str] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowerCAmelCase__ : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 714 | from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__)
class __snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
__lowerCamelCase = None
__lowerCamelCase = None
class __snake_case ( folder_based_builder.FolderBasedBuilder ):
__lowerCamelCase = datasets.Audio()
__lowerCamelCase = """audio"""
__lowerCamelCase = AudioFolderConfig
__lowerCamelCase = 42 # definition at the bottom of the script
__lowerCamelCase = AudioClassification(audio_column="""audio""" ,label_column="""label""" )
lowerCAmelCase__ : Tuple = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
lowerCAmelCase__ : List[Any] = AUDIO_EXTENSIONS
| 699 | 0 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def UpperCamelCase__ ( A__ , A__ ) -> str:
snake_case__ : int = XCLIPTextConfig()
# derive patch size from model name
snake_case__ : str = model_name.find('patch' )
snake_case__ : Optional[int] = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
snake_case__ : str = XCLIPVisionConfig(patch_size=A__ , num_frames=A__ )
if "large" in model_name:
snake_case__ : Union[str, Any] = 768
snake_case__ : Tuple = 3072
snake_case__ : int = 12
snake_case__ : Optional[int] = 1024
snake_case__ : Any = 4096
snake_case__ : List[Any] = 16
snake_case__ : List[Any] = 24
snake_case__ : Any = 768
snake_case__ : int = 3072
if model_name == "xclip-large-patch14-16-frames":
snake_case__ : List[Any] = 336
snake_case__ : str = XCLIPConfig.from_text_vision_configs(A__ , A__ )
if "large" in model_name:
snake_case__ : Optional[Any] = 768
return config
def UpperCamelCase__ ( A__ ) -> List[str]:
# text encoder
if name == "token_embedding.weight":
snake_case__ : Tuple = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
snake_case__ : int = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
snake_case__ : Any = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
snake_case__ : Optional[Any] = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
snake_case__ : Dict = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
snake_case__ : Tuple = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
snake_case__ : Dict = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
snake_case__ : Dict = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
snake_case__ : Optional[int] = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
snake_case__ : Tuple = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
snake_case__ : Optional[Any] = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
snake_case__ : Optional[int] = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
snake_case__ : Optional[Any] = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
snake_case__ : Optional[int] = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
snake_case__ : Optional[Any] = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
snake_case__ : Optional[int] = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
snake_case__ : Tuple = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
snake_case__ : Any = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
snake_case__ : Union[str, Any] = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
snake_case__ : List[str] = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
snake_case__ : Dict = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
snake_case__ : Any = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def UpperCamelCase__ ( A__ , A__ ) -> List[Any]:
for key in orig_state_dict.copy().keys():
snake_case__ : Union[str, Any] = orig_state_dict.pop(A__ )
if "attn.in_proj" in key:
snake_case__ : Any = key.split('.' )
if key.startswith('visual' ):
snake_case__ : List[Any] = key_split[3]
snake_case__ : Tuple = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
snake_case__ : int = val[
:dim, :
]
snake_case__ : Dict = val[
dim : dim * 2, :
]
snake_case__ : List[str] = val[
-dim:, :
]
else:
snake_case__ : Optional[int] = val[
:dim
]
snake_case__ : str = val[
dim : dim * 2
]
snake_case__ : List[str] = val[
-dim:
]
else:
if "weight" in key:
snake_case__ : int = val[
:dim, :
]
snake_case__ : List[str] = val[
dim : dim * 2, :
]
snake_case__ : Optional[int] = val[
-dim:, :
]
else:
snake_case__ : Dict = val[:dim]
snake_case__ : List[Any] = val[
dim : dim * 2
]
snake_case__ : Optional[int] = val[-dim:]
elif key.startswith('mit' ):
snake_case__ : Any = key_split[2]
snake_case__ : Optional[int] = config.vision_config.mit_hidden_size
if "weight" in key:
snake_case__ : Union[str, Any] = val[:dim, :]
snake_case__ : int = val[dim : dim * 2, :]
snake_case__ : Tuple = val[-dim:, :]
else:
snake_case__ : Optional[Any] = val[:dim]
snake_case__ : Any = val[dim : dim * 2]
snake_case__ : List[str] = val[-dim:]
else:
snake_case__ : Any = key_split[2]
snake_case__ : Any = config.text_config.hidden_size
if "weight" in key:
snake_case__ : int = val[:dim, :]
snake_case__ : Optional[Any] = val[
dim : dim * 2, :
]
snake_case__ : str = val[-dim:, :]
else:
snake_case__ : Any = val[:dim]
snake_case__ : Tuple = val[
dim : dim * 2
]
snake_case__ : List[Any] = val[-dim:]
else:
snake_case__ : List[Any] = rename_key(A__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
snake_case__ : List[str] = val.T
snake_case__ : List[str] = val
return orig_state_dict
def UpperCamelCase__ ( A__ ) -> List[Any]:
if num_frames == 8:
snake_case__ : int = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
snake_case__ : List[str] = 'eating_spaghetti.npy'
elif num_frames == 32:
snake_case__ : List[Any] = 'eating_spaghetti_32_frames.npy'
snake_case__ : str = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=A__ , repo_type='dataset' , )
snake_case__ : str = np.load(A__ )
return list(A__ )
def UpperCamelCase__ ( A__ , A__=None , A__=False ) -> Optional[Any]:
snake_case__ : Optional[Any] = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
snake_case__ : Union[str, Any] = model_to_url[model_name]
snake_case__ : Dict = 8
if "16-frames" in model_name:
snake_case__ : List[str] = 16
elif "shot" in model_name:
snake_case__ : str = 32
snake_case__ : Tuple = get_xclip_config(A__ , A__ )
snake_case__ : Optional[Any] = XCLIPModel(A__ )
model.eval()
if "drive" in checkpoint_url:
snake_case__ : Tuple = 'pytorch_model.bin'
gdown.cached_download(A__ , A__ , quiet=A__ )
snake_case__ : Dict = torch.load(A__ , map_location='cpu' )['model']
else:
snake_case__ : Dict = torch.hub.load_state_dict_from_url(A__ )['model']
snake_case__ : Tuple = convert_state_dict(A__ , A__ )
snake_case__ : Dict = XCLIPModel(A__ )
snake_case__ : int = model.load_state_dict(A__ , strict=A__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
snake_case__ : str = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
snake_case__ : int = VideoMAEImageProcessor(size=A__ )
snake_case__ : int = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
snake_case__ : List[str] = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
snake_case__ : Dict = XCLIPProcessor(image_processor=A__ , tokenizer=A__ )
snake_case__ : Union[str, Any] = prepare_video(A__ )
snake_case__ : int = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=A__ , return_tensors='pt' , padding=A__ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
snake_case__ : Dict = model(**A__ )
# Verify outputs
snake_case__ : Union[str, Any] = outputs.logits_per_video
snake_case__ : Optional[Any] = logits_per_video.softmax(dim=1 )
print('Probs:' , A__ )
# kinetics-400
if model_name == "xclip-base-patch32":
snake_case__ : Tuple = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
snake_case__ : Tuple = torch.tensor([[7.0_9_9_9e-0_4, 9.9_8_8_3e-0_1, 4.5_5_8_0e-0_4]] )
elif model_name == "xclip-base-patch16":
snake_case__ : List[str] = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
snake_case__ : Dict = torch.tensor([[7.6_9_3_7e-0_4, 9.9_7_2_8e-0_1, 1.9_4_7_3e-0_3]] )
elif model_name == "xclip-large-patch14":
snake_case__ : List[Any] = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
snake_case__ : int = torch.tensor([[3.3_8_7_7e-0_4, 9.9_9_3_7e-0_1, 2.8_8_8_8e-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
snake_case__ : str = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
snake_case__ : str = torch.tensor([[3.8_5_5_4e-0_4, 9.9_9_2_9e-0_1, 3.2_7_5_4e-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
snake_case__ : Optional[int] = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
snake_case__ : str = torch.tensor([[7.1_8_9_0e-0_6, 9.9_9_9_4e-0_1, 5.6_5_5_9e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
snake_case__ : int = torch.tensor([[1.0_3_2_0e-0_5, 9.9_9_9_3e-0_1, 6.2_4_3_5e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
snake_case__ : List[Any] = torch.tensor([[4.1_3_7_7e-0_6, 9.9_9_9_0e-0_1, 9.8_3_8_6e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
snake_case__ : List[Any] = torch.tensor([[4.1_3_4_7e-0_5, 9.9_9_6_2e-0_1, 3.3_4_1_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
snake_case__ : Optional[int] = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
snake_case__ : Union[str, Any] = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
snake_case__ : Any = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
snake_case__ : Tuple = torch.tensor([[9.8_2_1_9e-0_4, 9.9_5_9_3e-0_1, 3.0_8_6_3e-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
snake_case__ : Tuple = torch.tensor([[3.5_0_8_2e-0_4, 9.9_7_8_5e-0_1, 1.7_9_6_6e-0_3]] )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
assert torch.allclose(A__ , A__ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(A__ , organization='nielsr' )
processor.push_to_hub(A__ , organization='nielsr' )
slow_tokenizer.push_to_hub(A__ , organization='nielsr' )
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ : Tuple = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 715 | import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = IFInpaintingPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
return self._get_dummy_components()
def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> str:
'''simple docstring'''
if str(__UpperCamelCase ).startswith('mps' ):
snake_case__ : int = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ : Union[str, Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __a ( self ) -> List[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __a ( self ) -> List[str]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __a ( self ) -> List[str]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __a ( self ) -> int:
'''simple docstring'''
self._test_save_load_local()
def __a ( self ) -> List[str]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 699 | 0 |
class __snake_case :
def __init__( self ) -> List[str]:
'''simple docstring'''
snake_case__ : str = 0
snake_case__ : int = 0
snake_case__ : int = {}
def __a ( self , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
if vertex not in self.adjacency:
snake_case__ : Optional[int] = {}
self.num_vertices += 1
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
self.add_vertex(__UpperCamelCase )
self.add_vertex(__UpperCamelCase )
if head == tail:
return
snake_case__ : Optional[Any] = weight
snake_case__ : Union[str, Any] = weight
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[int] = self.get_edges()
for edge in edges:
snake_case__ : str = edge
edges.remove((tail, head, weight) )
for i in range(len(__UpperCamelCase ) ):
snake_case__ : Tuple = list(edges[i] )
edges.sort(key=lambda __UpperCamelCase : e[2] )
for i in range(len(__UpperCamelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
snake_case__ : str = edges[i][2] + 1
for edge in edges:
snake_case__ : List[Any] = edge
snake_case__ : Tuple = weight
snake_case__ : str = weight
def __str__( self ) -> int:
'''simple docstring'''
snake_case__ : Tuple = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
snake_case__ : List[str] = self.adjacency[head][tail]
string += F"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : str = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __a ( self ) -> Dict:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def __a ( __UpperCamelCase=None , __UpperCamelCase=None ) -> List[str]:
'''simple docstring'''
snake_case__ : int = Graph()
if vertices is None:
snake_case__ : List[str] = []
if edges is None:
snake_case__ : Tuple = []
for vertex in vertices:
g.add_vertex(__UpperCamelCase )
for edge in edges:
g.add_edge(*__UpperCamelCase )
return g
class __snake_case :
def __init__( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = {}
snake_case__ : str = {}
def __len__( self ) -> Optional[int]:
'''simple docstring'''
return len(self.parent )
def __a ( self , __UpperCamelCase ) -> Any:
'''simple docstring'''
if item in self.parent:
return self.find(__UpperCamelCase )
snake_case__ : Optional[int] = item
snake_case__ : str = 0
return item
def __a ( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if item not in self.parent:
return self.make_set(__UpperCamelCase )
if item != self.parent[item]:
snake_case__ : List[str] = self.find(self.parent[item] )
return self.parent[item]
def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = self.find(__UpperCamelCase )
snake_case__ : List[Any] = self.find(__UpperCamelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
snake_case__ : str = roota
return roota
if self.rank[roota] < self.rank[roota]:
snake_case__ : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
snake_case__ : Dict = roota
return roota
return None
@staticmethod
def __a ( __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
snake_case__ : Tuple = graph.num_vertices
snake_case__ : str = Graph.UnionFind()
snake_case__ : Dict = []
while num_components > 1:
snake_case__ : Optional[Any] = {}
for vertex in graph.get_vertices():
snake_case__ : List[str] = -1
snake_case__ : str = graph.get_edges()
for edge in edges:
snake_case__ : Optional[int] = edge
edges.remove((tail, head, weight) )
for edge in edges:
snake_case__ : Optional[int] = edge
snake_case__ : List[Any] = union_find.find(__UpperCamelCase )
snake_case__ : List[Any] = union_find.find(__UpperCamelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case__ : Dict = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case__ : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
snake_case__ : Dict = cheap_edge[vertex]
if union_find.find(__UpperCamelCase ) != union_find.find(__UpperCamelCase ):
union_find.union(__UpperCamelCase , __UpperCamelCase )
mst_edges.append(cheap_edge[vertex] )
snake_case__ : Union[str, Any] = num_components - 1
snake_case__ : Tuple = Graph.build(edges=__UpperCamelCase )
return mst
| 716 | import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : List[Any] = '''▁'''
lowerCAmelCase__ : int = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class __snake_case ( _lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = BertGenerationTokenizer
__lowerCamelCase = False
__lowerCamelCase = True
def __a ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
snake_case__ : str = BertGenerationTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[str] = '<s>'
snake_case__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(__UpperCamelCase ) , 1002 )
def __a ( self ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[Any] = BertGenerationTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
snake_case__ : int = tokenizer.tokenize('This is a test' )
self.assertListEqual(__UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [285, 46, 10, 170, 382] , )
snake_case__ : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
snake_case__ : Optional[Any] = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case__ : int = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def __a ( self ) -> Dict:
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : int = 'Hello World!'
snake_case__ : Union[str, Any] = [18536, 2260, 101]
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@slow
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : str = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
snake_case__ : List[Any] = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@require_torch
@slow
def __a ( self ) -> List[str]:
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
snake_case__ : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case__ : Optional[int] = ' '.join(__UpperCamelCase )
snake_case__ : int = self.big_tokenizer.encode_plus(__UpperCamelCase , return_tensors='pt' , return_token_type_ids=__UpperCamelCase )
snake_case__ : Tuple = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=__UpperCamelCase )
snake_case__ : Dict = BertGenerationConfig()
snake_case__ : List[str] = BertGenerationEncoder(__UpperCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCamelCase )
model(**__UpperCamelCase )
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 699 | 0 |
import numpy
class __snake_case :
def __init__( self , __UpperCamelCase , __UpperCamelCase ) -> None:
'''simple docstring'''
snake_case__ : Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
snake_case__ : Tuple = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
snake_case__ : Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
snake_case__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
snake_case__ : Any = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
snake_case__ : Optional[int] = numpy.zeros(output_array.shape )
def __a ( self ) -> numpy.ndarray:
'''simple docstring'''
snake_case__ : Optional[int] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
snake_case__ : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
snake_case__ : Tuple = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __a ( self ) -> None:
'''simple docstring'''
snake_case__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
snake_case__ : str = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
snake_case__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> None:
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
snake_case__ : List[str] = self.feedforward()
self.back_propagation()
if give_loss:
snake_case__ : Any = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
snake_case__ : Union[str, Any] = input_arr
snake_case__ : Optional[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
snake_case__ : Dict = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
snake_case__ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCamelCase__ ( A__ ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def UpperCamelCase__ ( A__ ) -> numpy.ndarray:
return (value) * (1 - (value))
def UpperCamelCase__ ( ) -> int:
snake_case__ : List[str] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
snake_case__ : str = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
snake_case__ : Optional[int] = TwoHiddenLayerNeuralNetwork(
input_array=A__ , output_array=A__ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=A__ , iterations=10 , give_loss=A__ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 717 | import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCAmelCase__ : List[str] = HfApi()
lowerCAmelCase__ : str = {}
# fmt: off
lowerCAmelCase__ : int = torch.tensor([
-0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67,
1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89,
-1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39,
0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57
])
lowerCAmelCase__ : Dict = torch.tensor([
-2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36,
1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08,
-2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48,
2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65
])
lowerCAmelCase__ : Dict = torch.tensor([
-0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69,
-0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04,
-0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25,
0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43
])
lowerCAmelCase__ : List[str] = torch.tensor([
0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72,
-0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09,
0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05,
-0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05
])
lowerCAmelCase__ : Union[str, Any] = torch.tensor([
0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33,
-0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95,
0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59,
-0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86
])
lowerCAmelCase__ : List[Any] = torch.tensor([
0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78,
-0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30,
0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83,
-0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31
])
lowerCAmelCase__ : Optional[Any] = torch.tensor([
0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42,
-0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98,
0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74,
-0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90
])
lowerCAmelCase__ : List[str] = torch.tensor([
0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42,
-0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90,
0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46,
-0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73
])
lowerCAmelCase__ : List[str] = torch.tensor([
-1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30,
1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43,
-2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10,
1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51])
lowerCAmelCase__ : List[Any] = torch.tensor([
-1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24,
0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81,
-2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59,
1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66
])
lowerCAmelCase__ : Tuple = torch.tensor([
-1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12,
0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27,
-2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31,
1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55
])
lowerCAmelCase__ : List[str] = torch.tensor([
-2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59,
1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51,
-3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41,
3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66
])
lowerCAmelCase__ : Dict = torch.tensor([
-2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40,
1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98,
-2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95,
2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43
])
lowerCAmelCase__ : Dict = torch.tensor([
-2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36,
1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08,
-3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60,
3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43
])
lowerCAmelCase__ : Any = torch.tensor([
-1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44,
1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91,
-2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39,
1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19
])
# fmt: on
lowerCAmelCase__ : Any = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCAmelCase__ : List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith('''CompVis'''):
lowerCAmelCase__ : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
lowerCAmelCase__ : str = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCAmelCase__ : Any = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCAmelCase__ : List[str] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowerCAmelCase__ : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 699 | 0 |
def UpperCamelCase__ ( A__ ) -> list[int]:
if length <= 0 or not isinstance(A__ , A__ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(A__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 718 | import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
class __snake_case ( _lowerCamelCase ):
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
| 699 | 0 |
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCamelCase__ ( A__ ) -> str:
# getting number of pixels in the image
snake_case__ : int = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(A__ ):
for j in range(A__ ):
snake_case__ : List[Any] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowerCAmelCase__ : Any = imread('''image_data/lena.jpg''', 1)
# convert to its negative
lowerCAmelCase__ : Optional[Any] = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 719 | import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __snake_case ( datasets.BuilderConfig ):
__lowerCamelCase = None
__lowerCamelCase = "utf-8"
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = True # deprecated
__lowerCamelCase = None # deprecated
__lowerCamelCase = 10 << 20 # 10MB
__lowerCamelCase = None
class __snake_case ( datasets.ArrowBasedBuilder ):
__lowerCamelCase = JsonConfig
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' )
snake_case__ : str = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' )
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' )
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
snake_case__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__UpperCamelCase , (str, list, tuple) ):
snake_case__ : Any = data_files
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case__ : Optional[Any] = [files]
snake_case__ : List[str] = [dl_manager.iter_files(__UpperCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
snake_case__ : List[Any] = []
for split_name, files in data_files.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case__ : List[Any] = [files]
snake_case__ : Any = [dl_manager.iter_files(__UpperCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__UpperCamelCase , gen_kwargs={'files': files} ) )
return splits
def __a ( self , __UpperCamelCase ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
snake_case__ : List[Any] = self.config.features.arrow_schema.field(__UpperCamelCase ).type
snake_case__ : List[str] = pa_table.append_column(__UpperCamelCase , pa.array([None] * len(__UpperCamelCase ) , type=__UpperCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case__ : List[str] = table_cast(__UpperCamelCase , self.config.features.arrow_schema )
return pa_table
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
snake_case__ : Union[str, Any] = json.load(__UpperCamelCase )
# We keep only the field we are interested in
snake_case__ : Tuple = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__UpperCamelCase , (list, tuple) ):
snake_case__ : List[Any] = set().union(*[row.keys() for row in dataset] )
snake_case__ : List[Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys}
else:
snake_case__ : List[Any] = dataset
snake_case__ : Dict = pa.Table.from_pydict(__UpperCamelCase )
yield file_idx, self._cast_table(__UpperCamelCase )
# If the file has one json object per line
else:
with open(__UpperCamelCase , 'rb' ) as f:
snake_case__ : Optional[int] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
snake_case__ : Tuple = max(self.config.chunksize // 32 , 16 << 10 )
snake_case__ : Optional[Any] = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
snake_case__ : Optional[int] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__UpperCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
snake_case__ : int = batch.decode(self.config.encoding , errors=__UpperCamelCase ).encode('utf-8' )
try:
while True:
try:
snake_case__ : List[str] = paj.read_json(
io.BytesIO(__UpperCamelCase ) , read_options=paj.ReadOptions(block_size=__UpperCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__UpperCamelCase , pa.ArrowInvalid )
and "straddling" not in str(__UpperCamelCase )
or block_size > len(__UpperCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(__UpperCamelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
snake_case__ : Tuple = json.load(__UpperCamelCase )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__UpperCamelCase , __UpperCamelCase ): # list is the only sequence type supported in JSON
try:
snake_case__ : str = set().union(*[row.keys() for row in dataset] )
snake_case__ : Union[str, Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys}
snake_case__ : List[str] = pa.Table.from_pydict(__UpperCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(__UpperCamelCase )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__UpperCamelCase )
batch_idx += 1
| 699 | 0 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=32 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=[10, 20, 30, 40] , __UpperCamelCase=[2, 2, 3, 2] , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=10 , __UpperCamelCase=0.0_2 , __UpperCamelCase=["stage2", "stage3", "stage4"] , __UpperCamelCase=[2, 3, 4] , __UpperCamelCase=None , ) -> int:
'''simple docstring'''
snake_case__ : List[str] = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Dict = image_size
snake_case__ : str = num_channels
snake_case__ : Tuple = num_stages
snake_case__ : Optional[Any] = hidden_sizes
snake_case__ : int = depths
snake_case__ : Tuple = is_training
snake_case__ : Dict = use_labels
snake_case__ : List[Any] = intermediate_size
snake_case__ : Union[str, Any] = hidden_act
snake_case__ : Optional[Any] = num_labels
snake_case__ : Dict = initializer_range
snake_case__ : List[Any] = out_features
snake_case__ : List[Any] = out_indices
snake_case__ : Any = scope
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : str = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : Dict = self.get_config()
return config, pixel_values, labels
def __a ( self ) -> int:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Any = ConvNextModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ : List[str] = model(__UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = ConvNextForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ : Optional[Any] = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
'''simple docstring'''
snake_case__ : Dict = ConvNextBackbone(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ : List[Any] = model(__UpperCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case__ : List[str] = None
snake_case__ : int = ConvNextBackbone(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ : Optional[Any] = model(__UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = self.prepare_config_and_inputs()
snake_case__ : str = config_and_inputs
snake_case__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Any = ConvNextModelTester(self )
snake_case__ : Union[str, Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __a ( self ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self ) -> Tuple:
'''simple docstring'''
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def __a ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def __a ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def __a ( self ) -> str:
'''simple docstring'''
pass
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Union[str, Any] = model_class(__UpperCamelCase )
snake_case__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[Any] = [*signature.parameters.keys()]
snake_case__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__UpperCamelCase )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
snake_case__ : Tuple = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ : int = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[str] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def __a ( self ) -> int:
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Dict = ConvNextModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def UpperCamelCase__ ( ) -> Union[str, Any]:
snake_case__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(__UpperCamelCase )
snake_case__ : Optional[Any] = self.default_image_processor
snake_case__ : List[str] = prepare_img()
snake_case__ : str = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : Any = model(**__UpperCamelCase )
# verify the logits
snake_case__ : List[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case__ : Union[str, Any] = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
@require_torch
class __snake_case ( unittest.TestCase ,_lowerCamelCase ):
__lowerCamelCase = (ConvNextBackbone,) if is_torch_available() else ()
__lowerCamelCase = ConvNextConfig
__lowerCamelCase = False
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : List[str] = ConvNextModelTester(self )
| 720 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ : Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 699 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
lowerCAmelCase__ : str = False
@skip_mps
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = StableDiffusionAttendAndExcitePipeline
__lowerCamelCase = False
__lowerCamelCase = TEXT_TO_IMAGE_PARAMS
__lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
__lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __a ( cls ) -> Any:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__UpperCamelCase )
@classmethod
def __a ( cls ) -> Any:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__UpperCamelCase )
def __a ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCamelCase , )
snake_case__ : Dict = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
snake_case__ : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
snake_case__ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
snake_case__ : Tuple = CLIPTextModel(__UpperCamelCase )
snake_case__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ : Any = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> Dict:
'''simple docstring'''
if str(__UpperCamelCase ).startswith('mps' ):
snake_case__ : str = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ : Any = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case__ : List[str] = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : List[Any] = 'cpu'
snake_case__ : int = self.get_dummy_components()
snake_case__ : Tuple = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Union[str, Any] = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : List[Any] = pipe(**__UpperCamelCase ).images
snake_case__ : List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
snake_case__ : Dict = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] )
snake_case__ : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCamelCase , 1E-3 )
def __a ( self ) -> str:
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __a ( self ) -> Dict:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __a ( self ) -> List[Any]:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __a ( self ) -> List[str]:
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __a ( self ) -> List[str]:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def __a ( self ) -> List[str]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class __snake_case ( unittest.TestCase ):
@classmethod
def __a ( cls ) -> List[str]:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__UpperCamelCase )
@classmethod
def __a ( cls ) -> Optional[Any]:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__UpperCamelCase )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Union[str, Any] = torch.manual_seed(51 )
snake_case__ : Dict = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
pipe.to('cuda' )
snake_case__ : Optional[int] = 'a painting of an elephant with glasses'
snake_case__ : Any = [5, 7]
snake_case__ : List[Any] = pipe(
prompt=__UpperCamelCase , token_indices=__UpperCamelCase , guidance_scale=7.5 , generator=__UpperCamelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
snake_case__ : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 721 | from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCAmelCase__ : Dict = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCAmelCase__ : List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCAmelCase__ : List[str] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def UpperCamelCase__ ( A__ , A__ ) -> tuple[str, float]:
snake_case__ : Tuple = len([g for position, g in enumerate(A__ ) if g == main_target[position]] )
return (item, float(A__ ))
def UpperCamelCase__ ( A__ , A__ ) -> tuple[str, str]:
snake_case__ : str = random.randint(0 , len(A__ ) - 1 )
snake_case__ : int = parent_a[:random_slice] + parent_a[random_slice:]
snake_case__ : Any = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def UpperCamelCase__ ( A__ , A__ ) -> str:
snake_case__ : List[Any] = list(A__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
snake_case__ : Optional[Any] = random.choice(A__ )
return "".join(A__ )
def UpperCamelCase__ ( A__ , A__ , A__ , ) -> list[str]:
snake_case__ : Tuple = []
# Generate more children proportionally to the fitness score.
snake_case__ : Optional[Any] = int(parent_a[1] * 100 ) + 1
snake_case__ : str = 10 if child_n >= 10 else child_n
for _ in range(A__ ):
snake_case__ : Any = population_score[random.randint(0 , A__ )][0]
snake_case__ , snake_case__ : int = crossover(parent_a[0] , A__ )
# Append new string to the population list.
pop.append(mutate(A__ , A__ ) )
pop.append(mutate(A__ , A__ ) )
return pop
def UpperCamelCase__ ( A__ , A__ , A__ = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
snake_case__ : Union[str, Any] = F"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(A__ )
# Verify that the target contains no genes besides the ones inside genes variable.
snake_case__ : Tuple = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
snake_case__ : int = F"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(A__ )
# Generate random starting population.
snake_case__ : Union[str, Any] = []
for _ in range(A__ ):
population.append(''.join([random.choice(A__ ) for i in range(len(A__ ) )] ) )
# Just some logs to know what the algorithms is doing.
snake_case__ , snake_case__ : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(A__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
snake_case__ : List[Any] = [evaluate(A__ , A__ ) for item in population]
# Check if there is a matching evolution.
snake_case__ : int = sorted(A__ , key=lambda A__ : x[1] , reverse=A__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"""\nGeneration: {generation}"""
F"""\nTotal Population:{total_population}"""
F"""\nBest score: {population_score[0][1]}"""
F"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
snake_case__ : Optional[int] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(A__ )
# Normalize population score to be between 0 and 1.
snake_case__ : str = [
(item, score / len(A__ )) for item, score in population_score
]
# This is selection
for i in range(A__ ):
population.extend(select(population_score[int(A__ )] , A__ , A__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(A__ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCAmelCase__ : str = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
lowerCAmelCase__ : Optional[Any] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ : List[str] = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 699 | 0 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __snake_case ( yaml.SafeLoader ):
def __a ( self , __UpperCamelCase ) -> Any:
'''simple docstring'''
snake_case__ : List[str] = [self.constructed_objects[key_node] for key_node, _ in node.value]
snake_case__ : Optional[Any] = [tuple(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else key for key in keys]
snake_case__ : int = Counter(__UpperCamelCase )
snake_case__ : Optional[int] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def __a ( self , __UpperCamelCase , __UpperCamelCase=False ) -> Tuple:
'''simple docstring'''
snake_case__ : int = super().construct_mapping(__UpperCamelCase , deep=__UpperCamelCase )
self._check_no_duplicates_on_constructed_node(__UpperCamelCase )
return mapping
def UpperCamelCase__ ( A__ ) -> Tuple[Optional[str], str]:
snake_case__ : Optional[int] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
snake_case__ : List[Any] = full_content[1:].index('---' ) + 1
snake_case__ : Union[str, Any] = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(A__ )
class __snake_case ( _lowerCamelCase ):
# class attributes
__lowerCamelCase = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def __a ( cls , __UpperCamelCase ) -> "DatasetMetadata":
'''simple docstring'''
with open(__UpperCamelCase , encoding='utf-8' ) as readme_file:
snake_case__ : Any = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__UpperCamelCase )
else:
return cls()
def __a ( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
if path.exists():
with open(__UpperCamelCase , encoding='utf-8' ) as readme_file:
snake_case__ : Tuple = readme_file.read()
else:
snake_case__ : Tuple = None
snake_case__ : Union[str, Any] = self._to_readme(__UpperCamelCase )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(__UpperCamelCase )
def __a ( self , __UpperCamelCase = None ) -> str:
'''simple docstring'''
if readme_content is not None:
snake_case__ : Dict = _split_yaml_from_readme(__UpperCamelCase )
snake_case__ : List[str] = '---\n' + self.to_yaml_string() + '---\n' + content
else:
snake_case__ : List[str] = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def __a ( cls , __UpperCamelCase ) -> "DatasetMetadata":
'''simple docstring'''
snake_case__ : Union[str, Any] = yaml.load(__UpperCamelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
snake_case__ : Tuple = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__UpperCamelCase )
def __a ( self ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__UpperCamelCase , allow_unicode=__UpperCamelCase , encoding='utf-8' , ).decode('utf-8' )
lowerCAmelCase__ : Any = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
lowerCAmelCase__ : Dict = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
lowerCAmelCase__ : Optional[Any] = ap.parse_args()
lowerCAmelCase__ : Tuple = Path(args.readme_filepath)
lowerCAmelCase__ : Dict = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 700 | from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase__ : Optional[int] = TypeVar('''T''')
class __snake_case ( Generic[T] ):
def __init__( self , __UpperCamelCase ) -> Any:
'''simple docstring'''
snake_case__ : Optional[int] = data
snake_case__ : Node[T] | None = None
def __str__( self ) -> str:
'''simple docstring'''
return F"""{self.data}"""
class __snake_case ( Generic[T] ):
def __init__( self ) -> None:
'''simple docstring'''
snake_case__ : Node[T] | None = None
def __iter__( self ) -> Iterator[T]:
'''simple docstring'''
snake_case__ : str = self.top
while node:
yield node.data
snake_case__ : Dict = node.next
def __str__( self ) -> str:
'''simple docstring'''
return "->".join([str(__UpperCamelCase ) for item in self] )
def __len__( self ) -> int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __a ( self ) -> bool:
'''simple docstring'''
return self.top is None
def __a ( self , __UpperCamelCase ) -> None:
'''simple docstring'''
snake_case__ : str = Node(__UpperCamelCase )
if not self.is_empty():
snake_case__ : List[str] = self.top
snake_case__ : Tuple = node
def __a ( self ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , __UpperCamelCase )
snake_case__ : List[str] = self.top
snake_case__ : Union[str, Any] = self.top.next
return pop_node.data
def __a ( self ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def __a ( self ) -> None:
'''simple docstring'''
snake_case__ : Any = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 699 | 0 |
from __future__ import annotations
import math
from collections.abc import Callable
def UpperCamelCase__ ( A__ , A__ , A__ , A__ = 100 , ) -> float:
snake_case__ : Optional[int] = x_start
snake_case__ : Any = fnc(A__ )
snake_case__ : List[str] = 0.0
for _ in range(A__ ):
# Approximates curve as a sequence of linear lines and sums their length
snake_case__ : Optional[int] = (x_end - x_start) / steps + xa
snake_case__ : Tuple = fnc(A__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
snake_case__ : str = xa
snake_case__ : Any = fxa
return length
if __name__ == "__main__":
def UpperCamelCase__ ( A__ ) -> Optional[int]:
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
lowerCAmelCase__ : Dict = 10
while i <= 10_00_00:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 701 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : int = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = """poolformer"""
def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=16 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=4.0 , __UpperCamelCase=[2, 2, 6, 2] , __UpperCamelCase=[64, 128, 320, 512] , __UpperCamelCase=[7, 3, 3, 3] , __UpperCamelCase=[4, 2, 2, 2] , __UpperCamelCase=[2, 1, 1, 1] , __UpperCamelCase=4 , __UpperCamelCase=0.0 , __UpperCamelCase="gelu" , __UpperCamelCase=True , __UpperCamelCase=1E-5 , __UpperCamelCase=0.0_2 , **__UpperCamelCase , ) -> Any:
'''simple docstring'''
snake_case__ : List[str] = num_channels
snake_case__ : Dict = patch_size
snake_case__ : Optional[int] = stride
snake_case__ : str = padding
snake_case__ : List[str] = pool_size
snake_case__ : List[Any] = hidden_sizes
snake_case__ : List[Any] = mlp_ratio
snake_case__ : Union[str, Any] = depths
snake_case__ : Dict = patch_sizes
snake_case__ : Dict = strides
snake_case__ : Dict = num_encoder_blocks
snake_case__ : Union[str, Any] = drop_path_rate
snake_case__ : List[str] = hidden_act
snake_case__ : Optional[Any] = use_layer_scale
snake_case__ : int = layer_scale_init_value
snake_case__ : Dict = initializer_range
super().__init__(**__UpperCamelCase )
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = version.parse("""1.11""" )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __a ( self ) -> float:
'''simple docstring'''
return 2E-3
| 699 | 0 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=64 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.0_2 , __UpperCamelCase=[1, 16, 4, 4] , __UpperCamelCase=None , ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = parent
snake_case__ : str = batch_size
snake_case__ : List[str] = image_size
snake_case__ : Tuple = patch_size
snake_case__ : str = num_channels
snake_case__ : Dict = is_training
snake_case__ : List[Any] = use_labels
snake_case__ : Tuple = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : List[str] = intermediate_size
snake_case__ : List[str] = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : Dict = type_sequence_label_size
snake_case__ : Optional[int] = initializer_range
snake_case__ : Optional[int] = scope
snake_case__ : Union[str, Any] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
snake_case__ : Dict = (self.image_size // 32) ** 2
snake_case__ : Optional[Any] = num_patches + 1
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[Any] = None
if self.use_labels:
snake_case__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Any = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__UpperCamelCase , )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = ViTHybridModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ : Any = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
'''simple docstring'''
snake_case__ : Any = self.type_sequence_label_size
snake_case__ : Dict = ViTHybridForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ : Tuple = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Dict = self.prepare_config_and_inputs()
snake_case__ : str = config_and_inputs
snake_case__ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__lowerCamelCase = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = ViTHybridModelTester(self )
snake_case__ : Optional[Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __a ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Dict = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = model_class(__UpperCamelCase )
snake_case__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Tuple = [*signature.parameters.keys()]
snake_case__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Tuple = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
snake_case__ : Optional[Any] = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
snake_case__ : Dict = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def __a ( self ) -> List[str]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : List[str] = ViTHybridModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def UpperCamelCase__ ( ) -> Dict:
snake_case__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Any = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCamelCase )
snake_case__ : Tuple = self.default_image_processor
snake_case__ : Tuple = prepare_img()
snake_case__ : Optional[Any] = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : int = model(**__UpperCamelCase )
# verify the logits
snake_case__ : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case__ : Optional[Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
@require_accelerate
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : List[Any] = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
snake_case__ : str = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
snake_case__ : Optional[int] = prepare_img()
snake_case__ : Optional[int] = image_processor(images=__UpperCamelCase , return_tensors='pt' )
snake_case__ : Optional[int] = model(**__UpperCamelCase )
snake_case__ : Tuple = outputs.logits
# model predicts one of the 1000 ImageNet classes
snake_case__ : Optional[int] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 702 | import numpy as np
import qiskit
def UpperCamelCase__ ( A__ = 8 , A__ = None ) -> str:
snake_case__ : Optional[int] = np.random.default_rng(seed=A__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
snake_case__ : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
snake_case__ : Tuple = rng.integers(2 , size=A__ )
# The set of states Alice will prepare.
snake_case__ : List[str] = rng.integers(2 , size=A__ )
# Measurement basis for Bob's qubits.
snake_case__ : List[Any] = rng.integers(2 , size=A__ )
# Quantum Circuit to simulate BB84
snake_case__ : Any = qiskit.QuantumCircuit(A__ , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(A__ ):
if alice_state[index] == 1:
bbaa_circ.x(A__ )
if alice_basis[index] == 1:
bbaa_circ.h(A__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(A__ ):
if bob_basis[index] == 1:
bbaa_circ.h(A__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
snake_case__ : List[str] = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
snake_case__ : Optional[Any] = qiskit.execute(A__ , A__ , shots=1 , seed_simulator=A__ )
# Returns the result of measurement.
snake_case__ : Union[str, Any] = job.result().get_counts(A__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
snake_case__ : Optional[Any] = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
A__ , A__ , A__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
snake_case__ : Tuple = gen_key[:key_len] if len(A__ ) >= key_len else gen_key.ljust(A__ , '0' )
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 699 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __snake_case ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.0_2 , __UpperCamelCase=4 , ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : str = parent
snake_case__ : List[str] = batch_size
snake_case__ : List[Any] = seq_length
snake_case__ : Optional[int] = is_training
snake_case__ : Union[str, Any] = use_attention_mask
snake_case__ : Dict = use_token_type_ids
snake_case__ : str = use_labels
snake_case__ : List[Any] = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : Union[str, Any] = num_attention_heads
snake_case__ : List[Any] = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : List[str] = max_position_embeddings
snake_case__ : int = type_vocab_size
snake_case__ : List[str] = type_sequence_label_size
snake_case__ : List[str] = initializer_range
snake_case__ : Any = num_choices
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : List[Any] = None
if self.use_attention_mask:
snake_case__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] = None
if self.use_token_type_ids:
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Any = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ : str = config_and_inputs
snake_case__ : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class __snake_case ( _lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = True
__lowerCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[str] = FlaxRoFormerModelTester(self )
@slow
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case__ : List[str] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=__UpperCamelCase )
snake_case__ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCamelCase )
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Any = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
snake_case__ : Union[str, Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case__ : int = model(__UpperCamelCase )[0]
snake_case__ : Optional[Any] = 50000
snake_case__ : int = (1, 6, vocab_size)
self.assertEqual(output.shape , __UpperCamelCase )
snake_case__ : List[Any] = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
| 703 | def UpperCamelCase__ ( A__ , A__ , A__ ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case__ : Dict = _modexpt(A__ , exponent // 2 , A__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A__ , exponent - 1 , A__ )) % modulo_value
def UpperCamelCase__ ( A__ = 1777 , A__ = 1855 , A__ = 8 ) -> int:
snake_case__ : Tuple = base
for _ in range(1 , A__ ):
snake_case__ : Any = _modexpt(A__ , A__ , 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 699 | 0 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __snake_case ( _lowerCamelCase ):
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCamelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , 'num_attention_heads' ) )
class __snake_case :
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=640 , __UpperCamelCase=4 , __UpperCamelCase="silu" , __UpperCamelCase=3 , __UpperCamelCase=32 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=10 , __UpperCamelCase=None , ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = parent
snake_case__ : Tuple = batch_size
snake_case__ : Any = image_size
snake_case__ : Any = patch_size
snake_case__ : Optional[int] = num_channels
snake_case__ : int = last_hidden_size
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : List[str] = hidden_act
snake_case__ : List[Any] = conv_kernel_size
snake_case__ : List[Any] = output_stride
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : str = classifier_dropout_prob
snake_case__ : str = use_labels
snake_case__ : Tuple = is_training
snake_case__ : str = num_labels
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Optional[int] = scope
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Optional[int] = None
snake_case__ : Any = None
if self.use_labels:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __a ( self ) -> Optional[int]:
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : List[str] = MobileViTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
'''simple docstring'''
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : Tuple = MobileViTForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ : Optional[Any] = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = self.num_labels
snake_case__ : int = MobileViTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ : int = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case__ : Any = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : str = self.prepare_config_and_inputs()
snake_case__ : Tuple = config_and_inputs
snake_case__ : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Any = MobileViTModelTester(self )
snake_case__ : Optional[int] = MobileViTConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def __a ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def __a ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def __a ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
pass
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(__UpperCamelCase )
snake_case__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[int] = [*signature.parameters.keys()]
snake_case__ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __a ( self ) -> List[Any]:
'''simple docstring'''
pass
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
snake_case__ : List[str] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : str = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case__ : Optional[Any] = outputs.hidden_states
snake_case__ : Tuple = 5
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
snake_case__ : Dict = 2
for i in range(len(__UpperCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Union[str, Any] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : Optional[Any] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
@slow
def __a ( self ) -> List[str]:
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : List[str] = MobileViTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def UpperCamelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def __a ( self ) -> int:
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(__UpperCamelCase )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : List[str] = prepare_img()
snake_case__ : List[str] = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : Tuple = model(**__UpperCamelCase )
# verify the logits
snake_case__ : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case__ : Optional[Any] = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
snake_case__ : str = model.to(__UpperCamelCase )
snake_case__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
snake_case__ : int = prepare_img()
snake_case__ : Optional[Any] = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : Any = model(**__UpperCamelCase )
snake_case__ : Tuple = outputs.logits
# verify the logits
snake_case__ : Optional[int] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __UpperCamelCase )
snake_case__ : Dict = torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9_8_6_8, -9.7_1_3_2], [-11.0405, -11.0221, -10.7318]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
] , device=__UpperCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
snake_case__ : List[str] = model.to(__UpperCamelCase )
snake_case__ : Any = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
snake_case__ : str = prepare_img()
snake_case__ : Optional[int] = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : Tuple = model(**__UpperCamelCase )
snake_case__ : str = outputs.logits.detach().cpu()
snake_case__ : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase , target_sizes=[(50, 60)] )
snake_case__ : List[str] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase )
snake_case__ : Tuple = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase )
snake_case__ : Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase )
| 704 | # tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ : Tuple = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCamelCase__ ( A__ ) -> Optional[Any]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A__ )
def UpperCamelCase__ ( A__ ) -> Optional[Any]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case__ : Union[str, Any] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(A__ , id=A__ )
| 699 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ : Dict = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[Any] = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 705 | def UpperCamelCase__ ( A__ ) -> list[int]:
if length <= 0 or not isinstance(A__ , A__ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(A__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 699 | 0 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCAmelCase__ : List[Any] = open # noqa: we just need to have a builtin inside this module to test it properly
| 706 | import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase__ : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def UpperCamelCase__ ( A__ , A__ , A__ ) -> List[str]:
snake_case__ : int = state_dict.pop(A__ )
snake_case__ : Union[str, Any] = val
def UpperCamelCase__ ( A__ ) -> int:
snake_case__ : List[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case__ : Any = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
snake_case__ : Optional[int] = value
else:
snake_case__ : Optional[int] = value
return new_state_dict
def UpperCamelCase__ ( A__ , A__=False ) -> Optional[int]:
snake_case__ : Optional[int] = ''
if is_panoptic:
snake_case__ : Tuple = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
snake_case__ : str = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Union[str, Any] = in_proj_weight[:256, :]
snake_case__ : Union[str, Any] = in_proj_bias[:256]
snake_case__ : Union[str, Any] = in_proj_weight[256:512, :]
snake_case__ : Optional[Any] = in_proj_bias[256:512]
snake_case__ : List[str] = in_proj_weight[-256:, :]
snake_case__ : Tuple = in_proj_bias[-256:]
def UpperCamelCase__ ( ) -> Tuple:
snake_case__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : str = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( A__ , A__ ) -> str:
snake_case__ : List[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case__ : Any = 'resnet101'
if "dc5" in model_name:
snake_case__ : Any = True
snake_case__ : int = 'panoptic' in model_name
if is_panoptic:
snake_case__ : str = 250
else:
snake_case__ : Union[str, Any] = 91
snake_case__ : Optional[int] = 'huggingface/label-files'
snake_case__ : Optional[Any] = 'coco-detection-id2label.json'
snake_case__ : str = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
snake_case__ : List[Any] = {int(A__ ): v for k, v in idalabel.items()}
snake_case__ : Any = idalabel
snake_case__ : int = {v: k for k, v in idalabel.items()}
# load image processor
snake_case__ : List[Any] = 'coco_panoptic' if is_panoptic else 'coco_detection'
snake_case__ : List[Any] = ConditionalDetrImageProcessor(format=A__ )
# prepare image
snake_case__ : List[str] = prepare_img()
snake_case__ : Any = image_processor(images=A__ , return_tensors='pt' )
snake_case__ : Dict = encoding['pixel_values']
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
snake_case__ : Any = torch.hub.load('DeppMeng/ConditionalDETR' , A__ , pretrained=A__ ).eval()
snake_case__ : Tuple = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case__ : List[Any] = 'conditional_detr.' + src
rename_key(A__ , A__ , A__ )
snake_case__ : Dict = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case__ : Optional[int] = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
snake_case__ : List[Any] = state_dict.pop(A__ )
snake_case__ : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case__ : str = state_dict.pop(A__ )
snake_case__ : List[Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
snake_case__ : Union[str, Any] = state_dict.pop(A__ )
snake_case__ : Dict = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
snake_case__ : List[Any] = state_dict.pop(A__ )
snake_case__ : Optional[int] = val
# finally, create HuggingFace model and load state dict
snake_case__ : Union[str, Any] = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
model.push_to_hub(repo_id=A__ , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
snake_case__ : Tuple = conditional_detr(A__ )
snake_case__ : str = model(A__ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 699 | 0 |
from collections import namedtuple
lowerCAmelCase__ : Union[str, Any] = namedtuple('''from_to''', '''from_ to''')
lowerCAmelCase__ : Tuple = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_01, 10_00),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_04_54, 2_64.1_72),
'''cubicyard''': from_to(0.7_64_55, 1.3_07_95),
'''cubicfoot''': from_to(0.0_28, 35.31_47),
'''cup''': from_to(0.0_00_23_65_88, 42_26.75),
}
def UpperCamelCase__ ( A__ , A__ , A__ ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ', '.join(A__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ', '.join(A__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 | from collections import namedtuple
lowerCAmelCase__ : Union[str, Any] = namedtuple('''from_to''', '''from_ to''')
lowerCAmelCase__ : Tuple = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_01, 10_00),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_04_54, 2_64.1_72),
'''cubicyard''': from_to(0.7_64_55, 1.3_07_95),
'''cubicfoot''': from_to(0.0_28, 35.31_47),
'''cup''': from_to(0.0_00_23_65_88, 42_26.75),
}
def UpperCamelCase__ ( A__ , A__ , A__ ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ', '.join(A__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ', '.join(A__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 699 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __a ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
snake_case__ : Any = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
torch.manual_seed(0 )
snake_case__ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case__ : Tuple = CLIPTextModel(__UpperCamelCase )
snake_case__ : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : Union[str, Any] = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' )
if str(__UpperCamelCase ).startswith('mps' ):
snake_case__ : str = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ : Dict = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case__ : str = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[int] = self.get_dummy_components()
snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : Optional[int] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Tuple = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : List[str] = sd_pipe(**__UpperCamelCase ).images
snake_case__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ : str = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Union[str, Any] = self.get_dummy_components()
snake_case__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : List[Any] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Union[str, Any] = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : List[str] = 'french fries'
snake_case__ : Optional[Any] = sd_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase )
snake_case__ : Union[str, Any] = output.images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ : Any = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : List[str] = self.get_dummy_components()
snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : str = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Dict = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : Any = [inputs['prompt']] * 2
snake_case__ : Optional[int] = np.array(inputs['image'] ).astype(np.floataa ) / 255.0
snake_case__ : Optional[int] = torch.from_numpy(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase )
snake_case__ : Any = image / 2 + 0.5
snake_case__ : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
snake_case__ : List[Any] = image.repeat(2 , 1 , 1 , 1 )
snake_case__ : Optional[int] = sd_pipe(**__UpperCamelCase ).images
snake_case__ : Union[str, Any] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
snake_case__ : List[Any] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[int] = self.get_dummy_components()
snake_case__ : Tuple = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' )
snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : List[str] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : str = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : Any = sd_pipe(**__UpperCamelCase ).images
snake_case__ : int = image[0, -3:, -3:, -1]
snake_case__ : Tuple = [round(__UpperCamelCase , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(__UpperCamelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
snake_case__ : List[Any] = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> int:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Optional[int] = self.get_dummy_components()
snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : Union[str, Any] = VaeImageProcessor(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase )
snake_case__ : Optional[int] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Optional[Any] = pipe(**self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' ) )[0]
snake_case__ : Union[str, Any] = components['vae']
snake_case__ : str = self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
snake_case__ : List[str] = vae.encode(inputs[image_param] ).latent_dist.mode()
snake_case__ : Dict = pipe(**__UpperCamelCase )[0]
snake_case__ : str = np.abs(out - out_latents_inputs ).max()
self.assertLess(__UpperCamelCase , 1E-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self , __UpperCamelCase=0 ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = torch.manual_seed(__UpperCamelCase )
snake_case__ : List[str] = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
snake_case__ : int = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : Tuple = self.get_inputs()
snake_case__ : List[Any] = pipe(**__UpperCamelCase ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case__ : Dict = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase )
snake_case__ : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : Dict = self.get_inputs()
snake_case__ : Dict = pipe(**__UpperCamelCase ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case__ : List[Any] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase )
snake_case__ : Tuple = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : Optional[int] = self.get_inputs()
snake_case__ : Optional[int] = pipe(**__UpperCamelCase ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case__ : int = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = 0
def callback_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> None:
snake_case__ : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case__ : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
snake_case__ : int = latents[0, -3:, -3:, -1]
snake_case__ : List[str] = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
snake_case__ : Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
snake_case__ : Dict = latents[0, -3:, -3:, -1]
snake_case__ : Optional[Any] = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
snake_case__ : str = False
snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
snake_case__ : int = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : int = self.get_inputs()
pipe(**__UpperCamelCase , callback=__UpperCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __a ( self ) -> Any:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
snake_case__ : Dict = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case__ : str = self.get_inputs()
snake_case__ : Tuple = pipe(**__UpperCamelCase )
snake_case__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : int = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case__ : Tuple = inputs['image'].resize((504, 504) )
snake_case__ : str = 'timbrooks/instruct-pix2pix'
snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__UpperCamelCase , safety_checker=__UpperCamelCase , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : str = pipe(**__UpperCamelCase )
snake_case__ : List[Any] = output.images[0]
snake_case__ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
snake_case__ : List[str] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 708 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Tuple = logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] = '''▁'''
lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ : Optional[Any] = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
lowerCAmelCase__ : str = {
'''facebook/xglm-564M''': 20_48,
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCamelCase , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase = None , **__UpperCamelCase , ) -> None:
'''simple docstring'''
snake_case__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case__ : Tuple = 7
snake_case__ : Dict = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case__ : Union[str, Any] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
snake_case__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
snake_case__ : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case__ : Tuple = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case__ : Tuple = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
snake_case__ : List[Any] = len(self.sp_model )
snake_case__ : Optional[Any] = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__UpperCamelCase )
snake_case__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = self.__dict__.copy()
snake_case__ : Optional[Any] = None
snake_case__ : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case__ : Any = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case__ : str = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __a ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase ))
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase ))
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case__ : int = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __a ( self ) -> Tuple:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def __a ( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case__ : Optional[Any] = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __a ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
snake_case__ : int = ''.join(__UpperCamelCase ).replace(__UpperCamelCase , ' ' ).strip()
return out_string
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ : List[str] = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , 'wb' ) as fi:
snake_case__ : Any = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 699 | 0 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCamelCase__ ( ) -> Tuple:
snake_case__ : Tuple = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
snake_case__ : Dict = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(A__ )
DownloadCommand.register_subcommand(A__ )
EnvironmentCommand.register_subcommand(A__ )
RunCommand.register_subcommand(A__ )
ServeCommand.register_subcommand(A__ )
UserCommands.register_subcommand(A__ )
AddNewModelCommand.register_subcommand(A__ )
AddNewModelLikeCommand.register_subcommand(A__ )
LfsCommands.register_subcommand(A__ )
PTtoTFCommand.register_subcommand(A__ )
# Let's go
snake_case__ : Any = parser.parse_args()
if not hasattr(A__ , 'func' ):
parser.print_help()
exit(1 )
# Run
snake_case__ : List[str] = args.func(A__ )
service.run()
if __name__ == "__main__":
main()
| 709 | import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCAmelCase__ : Any = logging.get_logger(__name__)
lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : Any = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Any = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Tuple = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Dict = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_12,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_12,
}
lowerCAmelCase__ : Union[str, Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_12,
'''facebook/dpr-question_encoder-multiset-base''': 5_12,
}
lowerCAmelCase__ : Optional[Any] = {
'''facebook/dpr-reader-single-nq-base''': 5_12,
'''facebook/dpr-reader-multiset-base''': 5_12,
}
lowerCAmelCase__ : Tuple = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase__ : Any = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase__ : List[str] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = DPRContextEncoderTokenizer
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = DPRQuestionEncoderTokenizer
lowerCAmelCase__ : Tuple = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCAmelCase__ : List[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCAmelCase__ : int = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(_lowerCamelCase )
class __snake_case :
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
elif titles is None or texts is None:
snake_case__ : Optional[Any] = titles if texts is None else texts
return super().__call__(
__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ : int = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles]
snake_case__ : Optional[int] = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts]
snake_case__ : List[Any] = len(__UpperCamelCase )
snake_case__ : str = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages
assert len(__UpperCamelCase ) == len(
__UpperCamelCase ), F"""There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts."""
snake_case__ : Optional[int] = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids']
snake_case__ : Optional[Any] = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids']
snake_case__ : Union[str, Any] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase )
]
}
if return_attention_mask is not False:
snake_case__ : List[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
snake_case__ : Union[str, Any] = attention_mask
return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = 64 , __UpperCamelCase = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
snake_case__ : Optional[Any] = reader_input['input_ids']
snake_case__ , snake_case__ , snake_case__ : Any = reader_output[:3]
snake_case__ : List[str] = len(__UpperCamelCase )
snake_case__ : Tuple = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ )
snake_case__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
snake_case__ : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
snake_case__ : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
snake_case__ : Union[str, Any] = sequence_ids.index(self.pad_token_id )
else:
snake_case__ : str = len(__UpperCamelCase )
snake_case__ : Dict = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
snake_case__ : Any = []
for start_index, start_score in enumerate(__UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
snake_case__ : str = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase )
snake_case__ : Any = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
snake_case__ : str = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_lowerCamelCase )
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = ["""input_ids""", """attention_mask"""]
__lowerCamelCase = DPRReaderTokenizer
| 699 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __snake_case :
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=10 , __UpperCamelCase=3 , __UpperCamelCase=2 , __UpperCamelCase=2 , __UpperCamelCase=2 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.0_2 , __UpperCamelCase=0.9 , __UpperCamelCase=None , ) -> int:
'''simple docstring'''
snake_case__ : List[Any] = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Any = image_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : Union[str, Any] = patch_size
snake_case__ : List[Any] = tubelet_size
snake_case__ : List[str] = num_frames
snake_case__ : Dict = is_training
snake_case__ : Optional[Any] = use_labels
snake_case__ : Dict = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : int = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : Dict = hidden_act
snake_case__ : Dict = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : Optional[Any] = type_sequence_label_size
snake_case__ : Optional[int] = initializer_range
snake_case__ : List[str] = mask_ratio
snake_case__ : Tuple = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
snake_case__ : str = (image_size // patch_size) ** 2
snake_case__ : Dict = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
snake_case__ : List[Any] = int(mask_ratio * self.seq_length )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : List[Any] = self.get_config()
return config, pixel_values, labels
def __a ( self ) -> Optional[int]:
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
'''simple docstring'''
snake_case__ : Any = VideoMAEModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ : Dict = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = VideoMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case__ : Any = torch.ones((self.num_masks,) )
snake_case__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
snake_case__ : Union[str, Any] = mask.expand(self.batch_size , -1 ).bool()
snake_case__ : str = model(__UpperCamelCase , __UpperCamelCase )
# model only returns predictions for masked patches
snake_case__ : str = mask.sum().item()
snake_case__ : Dict = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ : Optional[int] = config_and_inputs
snake_case__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__lowerCamelCase = (
{"""feature-extraction""": VideoMAEModel, """video-classification""": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = VideoMAEModelTester(self )
snake_case__ : Optional[Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : List[Any] = copy.deepcopy(__UpperCamelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case__ : int = torch.ones((self.model_tester.num_masks,) )
snake_case__ : Optional[int] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
snake_case__ : List[Any] = mask.expand(self.model_tester.batch_size , -1 ).bool()
snake_case__ : List[str] = bool_masked_pos.to(__UpperCamelCase )
if return_labels:
if model_class in [
*get_values(__UpperCamelCase ),
]:
snake_case__ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def __a ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def __a ( self ) -> str:
'''simple docstring'''
pass
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Dict = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__UpperCamelCase )
snake_case__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : str = [*signature.parameters.keys()]
snake_case__ : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
@slow
def __a ( self ) -> List[str]:
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Optional[int] = VideoMAEModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __a ( self ) -> int:
'''simple docstring'''
if not self.has_attentions:
pass
else:
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[str] = True
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = self.model_tester.seq_length - self.model_tester.num_masks
snake_case__ : int = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
snake_case__ : List[Any] = True
snake_case__ : Dict = False
snake_case__ : Dict = True
snake_case__ : Tuple = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case__ : List[Any] = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case__ : Union[str, Any] = True
snake_case__ : Union[str, Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case__ : Optional[Any] = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
snake_case__ : Dict = len(__UpperCamelCase )
# Check attention is always last and order is fine
snake_case__ : int = True
snake_case__ : Optional[Any] = True
snake_case__ : List[str] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(__UpperCamelCase ) )
snake_case__ : Union[str, Any] = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
snake_case__ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case__ : Optional[Any] = outputs.hidden_states
snake_case__ : Optional[int] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
snake_case__ : Tuple = self.model_tester.seq_length - self.model_tester.num_masks
snake_case__ : Optional[int] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[str] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : str = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __a ( self ) -> List[str]:
'''simple docstring'''
pass
def UpperCamelCase__ ( ) -> int:
snake_case__ : List[str] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
snake_case__ : Optional[Any] = np.load(A__ )
return list(A__ )
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
__UpperCamelCase )
snake_case__ : str = self.default_image_processor
snake_case__ : Optional[int] = prepare_video()
snake_case__ : Optional[Any] = image_processor(__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : Optional[int] = model(**__UpperCamelCase )
# verify the logits
snake_case__ : List[Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case__ : List[Any] = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Dict = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(__UpperCamelCase )
snake_case__ : str = self.default_image_processor
snake_case__ : Optional[int] = prepare_video()
snake_case__ : Tuple = image_processor(__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# add boolean mask, indicating which patches to mask
snake_case__ : Optional[Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
snake_case__ : Dict = torch.load(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : Tuple = model(**__UpperCamelCase )
# verify the logits
snake_case__ : List[str] = torch.Size([1, 1408, 1536] )
snake_case__ : List[Any] = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=__UpperCamelCase )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
snake_case__ : int = torch.tensor([0.5_1_4_2] , device=__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , __UpperCamelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
snake_case__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=__UpperCamelCase ).to(
__UpperCamelCase )
with torch.no_grad():
snake_case__ : Dict = model(**__UpperCamelCase )
snake_case__ : List[Any] = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , __UpperCamelCase , atol=1E-4 ) )
| 710 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __a ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
snake_case__ : Any = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
torch.manual_seed(0 )
snake_case__ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case__ : Tuple = CLIPTextModel(__UpperCamelCase )
snake_case__ : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : Union[str, Any] = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' )
if str(__UpperCamelCase ).startswith('mps' ):
snake_case__ : str = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ : Dict = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case__ : str = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[int] = self.get_dummy_components()
snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : Optional[int] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Tuple = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : List[str] = sd_pipe(**__UpperCamelCase ).images
snake_case__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ : str = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Union[str, Any] = self.get_dummy_components()
snake_case__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : List[Any] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Union[str, Any] = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : List[str] = 'french fries'
snake_case__ : Optional[Any] = sd_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase )
snake_case__ : Union[str, Any] = output.images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ : Any = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : List[str] = self.get_dummy_components()
snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : str = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Dict = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : Any = [inputs['prompt']] * 2
snake_case__ : Optional[int] = np.array(inputs['image'] ).astype(np.floataa ) / 2_5_5.0
snake_case__ : Optional[int] = torch.from_numpy(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase )
snake_case__ : Any = image / 2 + 0.5
snake_case__ : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
snake_case__ : List[Any] = image.repeat(2 , 1 , 1 , 1 )
snake_case__ : Optional[int] = sd_pipe(**__UpperCamelCase ).images
snake_case__ : Union[str, Any] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
snake_case__ : List[Any] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[int] = self.get_dummy_components()
snake_case__ : Tuple = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' )
snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : List[str] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : str = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : Any = sd_pipe(**__UpperCamelCase ).images
snake_case__ : int = image[0, -3:, -3:, -1]
snake_case__ : Tuple = [round(__UpperCamelCase , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(__UpperCamelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
snake_case__ : List[Any] = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> int:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Optional[int] = self.get_dummy_components()
snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : Union[str, Any] = VaeImageProcessor(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase )
snake_case__ : Optional[int] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Optional[Any] = pipe(**self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' ) )[0]
snake_case__ : Union[str, Any] = components['vae']
snake_case__ : str = self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
snake_case__ : List[str] = vae.encode(inputs[image_param] ).latent_dist.mode()
snake_case__ : Dict = pipe(**__UpperCamelCase )[0]
snake_case__ : str = np.abs(out - out_latents_inputs ).max()
self.assertLess(__UpperCamelCase , 1E-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self , __UpperCamelCase=0 ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = torch.manual_seed(__UpperCamelCase )
snake_case__ : List[str] = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
snake_case__ : int = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : Tuple = self.get_inputs()
snake_case__ : List[Any] = pipe(**__UpperCamelCase ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case__ : Dict = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase )
snake_case__ : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : Dict = self.get_inputs()
snake_case__ : Dict = pipe(**__UpperCamelCase ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case__ : List[Any] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase )
snake_case__ : Tuple = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : Optional[int] = self.get_inputs()
snake_case__ : Optional[int] = pipe(**__UpperCamelCase ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case__ : int = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = 0
def callback_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> None:
snake_case__ : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case__ : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
snake_case__ : int = latents[0, -3:, -3:, -1]
snake_case__ : List[str] = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
snake_case__ : Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
snake_case__ : Dict = latents[0, -3:, -3:, -1]
snake_case__ : Optional[Any] = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
snake_case__ : str = False
snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
snake_case__ : int = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : int = self.get_inputs()
pipe(**__UpperCamelCase , callback=__UpperCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __a ( self ) -> Any:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
snake_case__ : Dict = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case__ : str = self.get_inputs()
snake_case__ : Tuple = pipe(**__UpperCamelCase )
snake_case__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : int = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case__ : Tuple = inputs['image'].resize((504, 504) )
snake_case__ : str = 'timbrooks/instruct-pix2pix'
snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__UpperCamelCase , safety_checker=__UpperCamelCase , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : str = pipe(**__UpperCamelCase )
snake_case__ : List[Any] = output.images[0]
snake_case__ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
snake_case__ : List[str] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 699 | 0 |
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase__ : Optional[Any] = '''
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
'''
lowerCAmelCase__ : Optional[Any] = '''
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric("mean_iou")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
'''
lowerCAmelCase__ : List[Any] = '''\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}'''
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ = None , A__ = False , ) -> Optional[int]:
if label_map is not None:
for old_id, new_id in label_map.items():
snake_case__ : str = new_id
# turn into Numpy arrays
snake_case__ : Union[str, Any] = np.array(A__ )
snake_case__ : Any = np.array(A__ )
if reduce_labels:
snake_case__ : Optional[Any] = 255
snake_case__ : Union[str, Any] = label - 1
snake_case__ : Union[str, Any] = 255
snake_case__ : Tuple = label != ignore_index
snake_case__ : Any = np.not_equal(A__ , A__ )
snake_case__ : str = pred_label[mask]
snake_case__ : Any = np.array(A__ )[mask]
snake_case__ : Tuple = pred_label[pred_label == label]
snake_case__ : Dict = np.histogram(A__ , bins=A__ , range=(0, num_labels - 1) )[0]
snake_case__ : str = np.histogram(A__ , bins=A__ , range=(0, num_labels - 1) )[0]
snake_case__ : str = np.histogram(A__ , bins=A__ , range=(0, num_labels - 1) )[0]
snake_case__ : List[str] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ = None , A__ = False , ) -> Dict:
snake_case__ : str = np.zeros((num_labels,) , dtype=np.floataa )
snake_case__ : Optional[Any] = np.zeros((num_labels,) , dtype=np.floataa )
snake_case__ : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
snake_case__ : Union[str, Any] = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(A__ , A__ ):
snake_case__ : Any = intersect_and_union(
A__ , A__ , A__ , A__ , A__ , A__ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ = None , A__ = None , A__ = False , ) -> List[Any]:
snake_case__ : Dict = total_intersect_and_union(
A__ , A__ , A__ , A__ , A__ , A__ )
# compute metrics
snake_case__ : Tuple = {}
snake_case__ : str = total_area_intersect.sum() / total_area_label.sum()
snake_case__ : Union[str, Any] = total_area_intersect / total_area_union
snake_case__ : Optional[int] = total_area_intersect / total_area_label
snake_case__ : Optional[Any] = np.nanmean(A__ )
snake_case__ : Tuple = np.nanmean(A__ )
snake_case__ : Tuple = all_acc
snake_case__ : str = iou
snake_case__ : List[str] = acc
if nan_to_num is not None:
snake_case__ : str = {metric: np.nan_to_num(A__ , nan=A__ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ) -> int:
'''simple docstring'''
snake_case__ : Dict = mean_iou(
results=__UpperCamelCase , gt_seg_maps=__UpperCamelCase , num_labels=__UpperCamelCase , ignore_index=__UpperCamelCase , nan_to_num=__UpperCamelCase , label_map=__UpperCamelCase , reduce_labels=__UpperCamelCase , )
return iou_result
| 711 | from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 699 | 0 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
lowerCAmelCase__ : Tuple = '''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:])
lowerCAmelCase__ : List[Any] = requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
lowerCAmelCase__ : Optional[int] = BeautifulSoup(res.text, '''html.parser''')
lowerCAmelCase__ : int = list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(F'''https://google.com{link.get('href')}''')
| 712 | from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class __snake_case :
__lowerCamelCase = field(
metadata={"""help""": """The output directory where the model will be written."""} ,)
__lowerCamelCase = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} ,)
__lowerCamelCase = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} ,)
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def UpperCamelCase__ ( ) -> Union[str, Any]:
snake_case__ : str = HfArgumentParser((ModelArguments,) )
((snake_case__) , ) : Dict = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
snake_case__ : List[str] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
snake_case__ : Optional[int] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
snake_case__ : Optional[Any] = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
snake_case__ : List[str] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
snake_case__ : Any = True
snake_case__ : Dict = True
snake_case__ : Tuple = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=A__ , decoder_config=A__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
snake_case__ : Optional[Any] = decoder_config.decoder_start_token_id
snake_case__ : Tuple = decoder_config.pad_token_id
if decoder_start_token_id is None:
snake_case__ : Optional[Any] = decoder_config.bos_token_id
if pad_token_id is None:
snake_case__ : int = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
snake_case__ : Union[str, Any] = decoder_config.eos_token_id
snake_case__ : Optional[int] = decoder_start_token_id
snake_case__ : int = pad_token_id
snake_case__ : Tuple = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
snake_case__ : int = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 699 | 0 |
def UpperCamelCase__ ( A__ ) -> bool:
snake_case__ : Optional[int] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 713 | import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ , A__ = None , ) -> Optional[int]:
snake_case__ : List[str] = {}
if train_file is not None:
snake_case__ : Tuple = [train_file]
if eval_file is not None:
snake_case__ : Dict = [eval_file]
if test_file is not None:
snake_case__ : str = [test_file]
snake_case__ : Optional[Any] = datasets.load_dataset('csv' , data_files=A__ )
snake_case__ : Any = list(ds[list(files.keys() )[0]].features.keys() )
snake_case__ : Optional[Any] = features_name.pop(A__ )
snake_case__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case__ : str = {label: i for i, label in enumerate(A__ )}
snake_case__ : int = tokenizer.model_input_names
snake_case__ : int = {}
if len(A__ ) == 1:
for k in files.keys():
snake_case__ : str = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=A__ , max_length=A__ , padding='max_length' ) , batched=A__ , )
elif len(A__ ) == 2:
for k in files.keys():
snake_case__ : Optional[int] = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=A__ , max_length=A__ , padding='max_length' , ) , batched=A__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case__ : int = {k: v for k, v in ex.items() if k in input_names}
snake_case__ : Any = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case__ : int = {k: v for k, v in ex.items() if k in input_names}
snake_case__ : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case__ : Dict = {k: v for k, v in ex.items() if k in input_names}
snake_case__ : List[str] = labelaid[ex[label_name]]
yield (d, label)
snake_case__ : Any = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case__ : str = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case__ : Optional[int] = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case__ : Optional[int] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case__ : List[str] = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case__ : str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase__ : List[str] = logging.getLogger(__name__)
@dataclass
class __snake_case :
__lowerCamelCase = field(metadata={"""help""": """Which column contains the label"""} )
__lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the training file"""} )
__lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the development file"""} )
__lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the test file"""} )
__lowerCamelCase = field(
default=128 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class __snake_case :
__lowerCamelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
def UpperCamelCase__ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case__ , snake_case__ , snake_case__ : Dict = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=A__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case__ : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
def compute_metrics(A__ ) -> Dict:
snake_case__ : Optional[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case__ : Any = TFTrainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case__ : Dict = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case__ : Tuple = trainer.evaluate()
snake_case__ : Any = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(A__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(A__ )
return results
if __name__ == "__main__":
main()
| 699 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __snake_case :
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=None , __UpperCamelCase=2 , ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[str] = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : List[Any] = image_size
snake_case__ : int = patch_size
snake_case__ : str = num_channels
snake_case__ : Tuple = is_training
snake_case__ : Any = use_labels
snake_case__ : str = hidden_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : Dict = num_attention_heads
snake_case__ : List[str] = intermediate_size
snake_case__ : List[str] = hidden_act
snake_case__ : Dict = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : Optional[Any] = type_sequence_label_size
snake_case__ : str = initializer_range
snake_case__ : str = scope
snake_case__ : Optional[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
snake_case__ : Union[str, Any] = (image_size // patch_size) ** 2
snake_case__ : Dict = num_patches + 2
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Optional[Any] = None
if self.use_labels:
snake_case__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Dict = self.get_config()
return config, pixel_values, labels
def __a ( self ) -> List[str]:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = TFDeiTModel(config=__UpperCamelCase )
snake_case__ : List[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[Any] = TFDeiTForMaskedImageModeling(config=__UpperCamelCase )
snake_case__ : Union[str, Any] = model(__UpperCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case__ : Tuple = 1
snake_case__ : List[str] = TFDeiTForMaskedImageModeling(__UpperCamelCase )
snake_case__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : Union[str, Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
'''simple docstring'''
snake_case__ : Dict = self.type_sequence_label_size
snake_case__ : Optional[int] = TFDeiTForImageClassification(__UpperCamelCase )
snake_case__ : int = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ : Any = 1
snake_case__ : List[str] = TFDeiTForImageClassification(__UpperCamelCase )
snake_case__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : Optional[int] = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : str = self.prepare_config_and_inputs()
snake_case__ : Optional[Any] = config_and_inputs
snake_case__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__lowerCamelCase = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = TFDeiTModelTester(self )
snake_case__ : Any = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __a ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def __a ( self ) -> Tuple:
'''simple docstring'''
pass
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , tf.keras.layers.Dense ) )
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__UpperCamelCase )
snake_case__ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[int] = [*signature.parameters.keys()]
snake_case__ : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase )
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __a ( self ) -> Tuple:
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Dict = TFDeiTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def UpperCamelCase__ ( ) -> Any:
snake_case__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
snake_case__ : Tuple = self.default_image_processor
snake_case__ : List[Any] = prepare_img()
snake_case__ : Any = image_processor(images=__UpperCamelCase , return_tensors='tf' )
# forward pass
snake_case__ : Optional[Any] = model(**__UpperCamelCase )
# verify the logits
snake_case__ : List[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case__ : Union[str, Any] = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 714 | from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__)
class __snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
__lowerCamelCase = None
__lowerCamelCase = None
class __snake_case ( folder_based_builder.FolderBasedBuilder ):
__lowerCamelCase = datasets.Audio()
__lowerCamelCase = """audio"""
__lowerCamelCase = AudioFolderConfig
__lowerCamelCase = 42 # definition at the bottom of the script
__lowerCamelCase = AudioClassification(audio_column="""audio""" ,label_column="""label""" )
lowerCAmelCase__ : Tuple = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
lowerCAmelCase__ : List[Any] = AUDIO_EXTENSIONS
| 699 | 0 |
def UpperCamelCase__ ( ) -> int:
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(A__ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 715 | import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = IFInpaintingPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
return self._get_dummy_components()
def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> str:
'''simple docstring'''
if str(__UpperCamelCase ).startswith('mps' ):
snake_case__ : int = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ : Union[str, Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __a ( self ) -> List[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __a ( self ) -> List[str]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __a ( self ) -> List[str]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __a ( self ) -> int:
'''simple docstring'''
self._test_save_load_local()
def __a ( self ) -> List[str]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 699 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( _lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = KandinskyVaaPipeline
__lowerCamelCase = [
"""image_embeds""",
"""negative_image_embeds""",
]
__lowerCamelCase = ["""image_embeds""", """negative_image_embeds"""]
__lowerCamelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowerCamelCase = False
@property
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
return 32
@property
def __a ( self ) -> Any:
'''simple docstring'''
return 32
@property
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def __a ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __a ( self ) -> int:
'''simple docstring'''
return 100
@property
def __a ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Union[str, Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
snake_case__ : Optional[int] = UNetaDConditionModel(**__UpperCamelCase )
return model
@property
def __a ( self ) -> str:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __a ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Any = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = self.dummy_unet
snake_case__ : int = self.dummy_movq
snake_case__ : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=__UpperCamelCase , )
snake_case__ : int = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> Dict:
'''simple docstring'''
snake_case__ : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__UpperCamelCase )
if str(__UpperCamelCase ).startswith('mps' ):
snake_case__ : Dict = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ : Union[str, Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case__ : Any = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = 'cpu'
snake_case__ : Any = self.get_dummy_components()
snake_case__ : Dict = self.pipeline_class(**__UpperCamelCase )
snake_case__ : Any = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Union[str, Any] = pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
snake_case__ : List[str] = output.images
snake_case__ : List[Any] = pipe(
**self.get_dummy_inputs(__UpperCamelCase ) , return_dict=__UpperCamelCase , )[0]
snake_case__ : Tuple = image[0, -3:, -3:, -1]
snake_case__ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ : Optional[Any] = np.array(
[0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' )
snake_case__ : int = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCamelCase )
snake_case__ : List[str] = KandinskyVaaPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
snake_case__ : Optional[Any] = pipeline.to(__UpperCamelCase )
pipeline.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Any = 'red cat, 4k photo'
snake_case__ : Tuple = torch.Generator(device='cuda' ).manual_seed(0 )
snake_case__ : Union[str, Any] = pipe_prior(
__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
snake_case__ : List[Any] = torch.Generator(device='cuda' ).manual_seed(0 )
snake_case__ : Dict = pipeline(
image_embeds=__UpperCamelCase , negative_image_embeds=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=100 , output_type='np' , )
snake_case__ : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
| 716 | import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : List[Any] = '''▁'''
lowerCAmelCase__ : int = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class __snake_case ( _lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = BertGenerationTokenizer
__lowerCamelCase = False
__lowerCamelCase = True
def __a ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
snake_case__ : str = BertGenerationTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[str] = '<s>'
snake_case__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(__UpperCamelCase ) , 1002 )
def __a ( self ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[Any] = BertGenerationTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
snake_case__ : int = tokenizer.tokenize('This is a test' )
self.assertListEqual(__UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [285, 46, 10, 170, 382] , )
snake_case__ : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
snake_case__ : Optional[Any] = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case__ : int = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def __a ( self ) -> Dict:
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : int = 'Hello World!'
snake_case__ : Union[str, Any] = [18536, 2260, 101]
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@slow
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : str = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
snake_case__ : List[Any] = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@require_torch
@slow
def __a ( self ) -> List[str]:
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
snake_case__ : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case__ : Optional[int] = ' '.join(__UpperCamelCase )
snake_case__ : int = self.big_tokenizer.encode_plus(__UpperCamelCase , return_tensors='pt' , return_token_type_ids=__UpperCamelCase )
snake_case__ : Tuple = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=__UpperCamelCase )
snake_case__ : Dict = BertGenerationConfig()
snake_case__ : List[str] = BertGenerationEncoder(__UpperCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCamelCase )
model(**__UpperCamelCase )
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 699 | 0 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def UpperCamelCase__ ( A__ ) -> np.ndarray:
return input_array.reshape((input_array.size, 1) )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> np.ndarray:
snake_case__ : Tuple = np.nan
for i in range(A__ ):
snake_case__ : Optional[Any] = features[:, labels == i]
snake_case__ : Tuple = data.mean(1 )
# Centralize the data of class i
snake_case__ : Optional[Any] = data - column_reshape(A__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(A__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
snake_case__ : Optional[int] = np.dot(A__ , centered_data.T )
return covariance_sum / features.shape[1]
def UpperCamelCase__ ( A__ , A__ , A__ ) -> np.ndarray:
snake_case__ : Tuple = features.mean(1 )
snake_case__ : Any = np.nan
for i in range(A__ ):
snake_case__ : Dict = features[:, labels == i]
snake_case__ : Optional[Any] = data.shape[1]
snake_case__ : List[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(A__ ) - column_reshape(A__ ) , (column_reshape(A__ ) - column_reshape(A__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
snake_case__ : Any = device_data * np.dot(
column_reshape(A__ ) - column_reshape(A__ ) , (column_reshape(A__ ) - column_reshape(A__ )).T , )
return covariance_sum / features.shape[1]
def UpperCamelCase__ ( A__ , A__ ) -> np.ndarray:
# Check if the features have been loaded
if features.any():
snake_case__ : List[str] = features.mean(1 )
# Center the dataset
snake_case__ : Optional[Any] = features - np.reshape(A__ , (data_mean.size, 1) )
snake_case__ : List[str] = np.dot(A__ , centered_data.T ) / features.shape[1]
snake_case__ : List[str] = np.linalg.eigh(A__ )
# Take all the columns in the reverse order (-1), and then takes only the first
snake_case__ : Tuple = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
snake_case__ : Optional[int] = np.dot(filtered_eigenvectors.T , A__ )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=A__ )
logging.error('Dataset empty' )
raise AssertionError
def UpperCamelCase__ ( A__ , A__ , A__ , A__ ) -> np.ndarray:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
snake_case__ : List[str] = eigh(
covariance_between_classes(A__ , A__ , A__ ) , covariance_within_classes(A__ , A__ , A__ ) , )
snake_case__ : str = eigenvectors[:, ::-1][:, :dimensions]
snake_case__ : Dict = np.linalg.svd(A__ )
snake_case__ : List[str] = svd_matrix[:, 0:dimensions]
snake_case__ : Any = np.dot(filtered_svd_matrix.T , A__ )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=A__ )
logging.error('Dataset empty' )
raise AssertionError
def UpperCamelCase__ ( ) -> None:
# Create dummy dataset with 2 classes and 3 features
snake_case__ : int = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
snake_case__ : Any = np.array([0, 0, 0, 1, 1] )
snake_case__ : List[str] = 2
snake_case__ : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(A__ ) as error_info:
snake_case__ : Optional[Any] = linear_discriminant_analysis(
A__ , A__ , A__ , A__ )
if isinstance(A__ , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def UpperCamelCase__ ( ) -> None:
snake_case__ : int = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
snake_case__ : List[Any] = 2
snake_case__ : Any = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 10.3923_0485], [3.0, 3.0, 3.0]] )
with pytest.raises(A__ ) as error_info:
snake_case__ : int = principal_component_analysis(A__ , A__ )
if not np.allclose(A__ , A__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 | import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCAmelCase__ : List[str] = HfApi()
lowerCAmelCase__ : str = {}
# fmt: off
lowerCAmelCase__ : int = torch.tensor([
-0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67,
1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89,
-1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39,
0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57
])
lowerCAmelCase__ : Dict = torch.tensor([
-2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36,
1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08,
-2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48,
2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65
])
lowerCAmelCase__ : Dict = torch.tensor([
-0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69,
-0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04,
-0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25,
0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43
])
lowerCAmelCase__ : List[str] = torch.tensor([
0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72,
-0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09,
0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05,
-0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05
])
lowerCAmelCase__ : Union[str, Any] = torch.tensor([
0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33,
-0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95,
0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59,
-0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86
])
lowerCAmelCase__ : List[Any] = torch.tensor([
0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78,
-0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30,
0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83,
-0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31
])
lowerCAmelCase__ : Optional[Any] = torch.tensor([
0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42,
-0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98,
0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74,
-0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90
])
lowerCAmelCase__ : List[str] = torch.tensor([
0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42,
-0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90,
0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46,
-0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73
])
lowerCAmelCase__ : List[str] = torch.tensor([
-1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30,
1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43,
-2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10,
1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51])
lowerCAmelCase__ : List[Any] = torch.tensor([
-1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24,
0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81,
-2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59,
1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66
])
lowerCAmelCase__ : Tuple = torch.tensor([
-1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12,
0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27,
-2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31,
1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55
])
lowerCAmelCase__ : List[str] = torch.tensor([
-2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59,
1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51,
-3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41,
3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66
])
lowerCAmelCase__ : Dict = torch.tensor([
-2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40,
1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98,
-2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95,
2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43
])
lowerCAmelCase__ : Dict = torch.tensor([
-2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36,
1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08,
-3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60,
3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43
])
lowerCAmelCase__ : Any = torch.tensor([
-1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44,
1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91,
-2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39,
1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19
])
# fmt: on
lowerCAmelCase__ : Any = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCAmelCase__ : List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith('''CompVis'''):
lowerCAmelCase__ : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
lowerCAmelCase__ : str = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCAmelCase__ : Any = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCAmelCase__ : List[str] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowerCAmelCase__ : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 699 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : str = logging.get_logger(__name__)
lowerCAmelCase__ : List[str] = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = """cvt"""
def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=[7, 3, 3] , __UpperCamelCase=[4, 2, 2] , __UpperCamelCase=[2, 1, 1] , __UpperCamelCase=[64, 192, 384] , __UpperCamelCase=[1, 3, 6] , __UpperCamelCase=[1, 2, 10] , __UpperCamelCase=[4.0, 4.0, 4.0] , __UpperCamelCase=[0.0, 0.0, 0.0] , __UpperCamelCase=[0.0, 0.0, 0.0] , __UpperCamelCase=[0.0, 0.0, 0.1] , __UpperCamelCase=[True, True, True] , __UpperCamelCase=[False, False, True] , __UpperCamelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCamelCase=[3, 3, 3] , __UpperCamelCase=[1, 1, 1] , __UpperCamelCase=[2, 2, 2] , __UpperCamelCase=[1, 1, 1] , __UpperCamelCase=[1, 1, 1] , __UpperCamelCase=0.0_2 , __UpperCamelCase=1E-12 , **__UpperCamelCase , ) -> int:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
snake_case__ : List[str] = num_channels
snake_case__ : int = patch_sizes
snake_case__ : Optional[Any] = patch_stride
snake_case__ : str = patch_padding
snake_case__ : Dict = embed_dim
snake_case__ : List[str] = num_heads
snake_case__ : Any = depth
snake_case__ : Optional[Any] = mlp_ratio
snake_case__ : Union[str, Any] = attention_drop_rate
snake_case__ : List[str] = drop_rate
snake_case__ : Tuple = drop_path_rate
snake_case__ : Optional[Any] = qkv_bias
snake_case__ : Tuple = cls_token
snake_case__ : Dict = qkv_projection_method
snake_case__ : Dict = kernel_qkv
snake_case__ : str = padding_kv
snake_case__ : Dict = stride_kv
snake_case__ : Union[str, Any] = padding_q
snake_case__ : Tuple = stride_q
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Union[str, Any] = layer_norm_eps
| 718 | import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
class __snake_case ( _lowerCamelCase ):
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
| 699 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ : Tuple = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719 | import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __snake_case ( datasets.BuilderConfig ):
__lowerCamelCase = None
__lowerCamelCase = "utf-8"
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = True # deprecated
__lowerCamelCase = None # deprecated
__lowerCamelCase = 10 << 20 # 10MB
__lowerCamelCase = None
class __snake_case ( datasets.ArrowBasedBuilder ):
__lowerCamelCase = JsonConfig
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' )
snake_case__ : str = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' )
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' )
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
snake_case__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__UpperCamelCase , (str, list, tuple) ):
snake_case__ : Any = data_files
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case__ : Optional[Any] = [files]
snake_case__ : List[str] = [dl_manager.iter_files(__UpperCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
snake_case__ : List[Any] = []
for split_name, files in data_files.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case__ : List[Any] = [files]
snake_case__ : Any = [dl_manager.iter_files(__UpperCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__UpperCamelCase , gen_kwargs={'files': files} ) )
return splits
def __a ( self , __UpperCamelCase ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
snake_case__ : List[Any] = self.config.features.arrow_schema.field(__UpperCamelCase ).type
snake_case__ : List[str] = pa_table.append_column(__UpperCamelCase , pa.array([None] * len(__UpperCamelCase ) , type=__UpperCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case__ : List[str] = table_cast(__UpperCamelCase , self.config.features.arrow_schema )
return pa_table
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
snake_case__ : Union[str, Any] = json.load(__UpperCamelCase )
# We keep only the field we are interested in
snake_case__ : Tuple = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__UpperCamelCase , (list, tuple) ):
snake_case__ : List[Any] = set().union(*[row.keys() for row in dataset] )
snake_case__ : List[Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys}
else:
snake_case__ : List[Any] = dataset
snake_case__ : Dict = pa.Table.from_pydict(__UpperCamelCase )
yield file_idx, self._cast_table(__UpperCamelCase )
# If the file has one json object per line
else:
with open(__UpperCamelCase , 'rb' ) as f:
snake_case__ : Optional[int] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
snake_case__ : Tuple = max(self.config.chunksize // 32 , 16 << 10 )
snake_case__ : Optional[Any] = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
snake_case__ : Optional[int] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__UpperCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
snake_case__ : int = batch.decode(self.config.encoding , errors=__UpperCamelCase ).encode('utf-8' )
try:
while True:
try:
snake_case__ : List[str] = paj.read_json(
io.BytesIO(__UpperCamelCase ) , read_options=paj.ReadOptions(block_size=__UpperCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__UpperCamelCase , pa.ArrowInvalid )
and "straddling" not in str(__UpperCamelCase )
or block_size > len(__UpperCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(__UpperCamelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
snake_case__ : Tuple = json.load(__UpperCamelCase )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__UpperCamelCase , __UpperCamelCase ): # list is the only sequence type supported in JSON
try:
snake_case__ : str = set().union(*[row.keys() for row in dataset] )
snake_case__ : Union[str, Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys}
snake_case__ : List[str] = pa.Table.from_pydict(__UpperCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(__UpperCamelCase )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__UpperCamelCase )
batch_idx += 1
| 699 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : Tuple = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 720 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ : Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 699 | 0 |
import os
from distutils.util import strtobool
def UpperCamelCase__ ( A__ , A__ ) -> Optional[int]:
for e in env_keys:
snake_case__ : Optional[int] = int(os.environ.get(A__ , -1 ) )
if val >= 0:
return val
return default
def UpperCamelCase__ ( A__ , A__=False ) -> Union[str, Any]:
snake_case__ : Union[str, Any] = os.environ.get(A__ , str(A__ ) )
return strtobool(A__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def UpperCamelCase__ ( A__ , A__="no" ) -> int:
snake_case__ : List[Any] = os.environ.get(A__ , str(A__ ) )
return value
| 721 | from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCAmelCase__ : Dict = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCAmelCase__ : List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCAmelCase__ : List[str] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def UpperCamelCase__ ( A__ , A__ ) -> tuple[str, float]:
snake_case__ : Tuple = len([g for position, g in enumerate(A__ ) if g == main_target[position]] )
return (item, float(A__ ))
def UpperCamelCase__ ( A__ , A__ ) -> tuple[str, str]:
snake_case__ : str = random.randint(0 , len(A__ ) - 1 )
snake_case__ : int = parent_a[:random_slice] + parent_a[random_slice:]
snake_case__ : Any = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def UpperCamelCase__ ( A__ , A__ ) -> str:
snake_case__ : List[Any] = list(A__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
snake_case__ : Optional[Any] = random.choice(A__ )
return "".join(A__ )
def UpperCamelCase__ ( A__ , A__ , A__ , ) -> list[str]:
snake_case__ : Tuple = []
# Generate more children proportionally to the fitness score.
snake_case__ : Optional[Any] = int(parent_a[1] * 100 ) + 1
snake_case__ : str = 10 if child_n >= 10 else child_n
for _ in range(A__ ):
snake_case__ : Any = population_score[random.randint(0 , A__ )][0]
snake_case__ , snake_case__ : int = crossover(parent_a[0] , A__ )
# Append new string to the population list.
pop.append(mutate(A__ , A__ ) )
pop.append(mutate(A__ , A__ ) )
return pop
def UpperCamelCase__ ( A__ , A__ , A__ = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
snake_case__ : Union[str, Any] = F"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(A__ )
# Verify that the target contains no genes besides the ones inside genes variable.
snake_case__ : Tuple = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
snake_case__ : int = F"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(A__ )
# Generate random starting population.
snake_case__ : Union[str, Any] = []
for _ in range(A__ ):
population.append(''.join([random.choice(A__ ) for i in range(len(A__ ) )] ) )
# Just some logs to know what the algorithms is doing.
snake_case__ , snake_case__ : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(A__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
snake_case__ : List[Any] = [evaluate(A__ , A__ ) for item in population]
# Check if there is a matching evolution.
snake_case__ : int = sorted(A__ , key=lambda A__ : x[1] , reverse=A__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"""\nGeneration: {generation}"""
F"""\nTotal Population:{total_population}"""
F"""\nBest score: {population_score[0][1]}"""
F"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
snake_case__ : Optional[int] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(A__ )
# Normalize population score to be between 0 and 1.
snake_case__ : str = [
(item, score / len(A__ )) for item, score in population_score
]
# This is selection
for i in range(A__ ):
population.extend(select(population_score[int(A__ )] , A__ , A__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(A__ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCAmelCase__ : str = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
lowerCAmelCase__ : Optional[Any] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ : List[str] = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 699 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase__ : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowerCAmelCase__ : Any = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Optional[int] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
snake_case__ : Optional[int] = self.transformer_dir
shutil.copy(
os.path.join(__UpperCamelCase , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ) -> str:
'''simple docstring'''
snake_case__ : Dict = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
snake_case__ : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
snake_case__ : Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
snake_case__ : Union[str, Any] = black.format_str(__UpperCamelCase , mode=__UpperCamelCase )
snake_case__ : Union[str, Any] = os.path.join(self.transformer_dir , 'new_code.py' )
with open(__UpperCamelCase , 'w' , newline='\n' ) as f:
f.write(__UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__UpperCamelCase )
with open(__UpperCamelCase , 'r' ) as f:
self.assertTrue(f.read() , __UpperCamelCase )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , __UpperCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , __UpperCamelCase ) , )
# Copy consistency with a really long name
snake_case__ : str = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , F"""{long_class_name}LMPredictionHead""" , re.sub('Bert' , __UpperCamelCase , __UpperCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , __UpperCamelCase , overwrite_result=re.sub('Bert' , 'TestModel' , __UpperCamelCase ) , )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Union[str, Any] = check_copies.LOCALIZED_READMES['README_zh-hans.md']
snake_case__ : Optional[int] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
snake_case__ : Any = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
snake_case__ : List[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
snake_case__ : Tuple = check_copies.convert_to_localized_md(
__UpperCamelCase , __UpperCamelCase , localized_readme['format_model_list'] )
self.assertFalse(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ : int = check_copies.convert_to_localized_md(
__UpperCamelCase , __UpperCamelCase , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__UpperCamelCase )
snake_case__ : int = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
snake_case__ : List[str] = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
snake_case__ : Dict = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
snake_case__ : Optional[int] = check_copies.convert_to_localized_md(
__UpperCamelCase , __UpperCamelCase , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
| 700 | from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase__ : Optional[int] = TypeVar('''T''')
class __snake_case ( Generic[T] ):
def __init__( self , __UpperCamelCase ) -> Any:
'''simple docstring'''
snake_case__ : Optional[int] = data
snake_case__ : Node[T] | None = None
def __str__( self ) -> str:
'''simple docstring'''
return F"""{self.data}"""
class __snake_case ( Generic[T] ):
def __init__( self ) -> None:
'''simple docstring'''
snake_case__ : Node[T] | None = None
def __iter__( self ) -> Iterator[T]:
'''simple docstring'''
snake_case__ : str = self.top
while node:
yield node.data
snake_case__ : Dict = node.next
def __str__( self ) -> str:
'''simple docstring'''
return "->".join([str(__UpperCamelCase ) for item in self] )
def __len__( self ) -> int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __a ( self ) -> bool:
'''simple docstring'''
return self.top is None
def __a ( self , __UpperCamelCase ) -> None:
'''simple docstring'''
snake_case__ : str = Node(__UpperCamelCase )
if not self.is_empty():
snake_case__ : List[str] = self.top
snake_case__ : Tuple = node
def __a ( self ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , __UpperCamelCase )
snake_case__ : List[str] = self.top
snake_case__ : Union[str, Any] = self.top.next
return pop_node.data
def __a ( self ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def __a ( self ) -> None:
'''simple docstring'''
snake_case__ : Any = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 699 | 0 |
import numpy as np
import qiskit
def UpperCamelCase__ ( A__ = 8 , A__ = None ) -> str:
snake_case__ : Optional[int] = np.random.default_rng(seed=A__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
snake_case__ : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
snake_case__ : Tuple = rng.integers(2 , size=A__ )
# The set of states Alice will prepare.
snake_case__ : List[str] = rng.integers(2 , size=A__ )
# Measurement basis for Bob's qubits.
snake_case__ : List[Any] = rng.integers(2 , size=A__ )
# Quantum Circuit to simulate BB84
snake_case__ : Any = qiskit.QuantumCircuit(A__ , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(A__ ):
if alice_state[index] == 1:
bbaa_circ.x(A__ )
if alice_basis[index] == 1:
bbaa_circ.h(A__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(A__ ):
if bob_basis[index] == 1:
bbaa_circ.h(A__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
snake_case__ : List[str] = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
snake_case__ : Optional[Any] = qiskit.execute(A__ , A__ , shots=1 , seed_simulator=A__ )
# Returns the result of measurement.
snake_case__ : Union[str, Any] = job.result().get_counts(A__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
snake_case__ : Optional[Any] = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
A__ , A__ , A__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
snake_case__ : Tuple = gen_key[:key_len] if len(A__ ) >= key_len else gen_key.ljust(A__ , '0' )
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 701 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : int = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = """poolformer"""
def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=16 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=4.0 , __UpperCamelCase=[2, 2, 6, 2] , __UpperCamelCase=[64, 128, 320, 512] , __UpperCamelCase=[7, 3, 3, 3] , __UpperCamelCase=[4, 2, 2, 2] , __UpperCamelCase=[2, 1, 1, 1] , __UpperCamelCase=4 , __UpperCamelCase=0.0 , __UpperCamelCase="gelu" , __UpperCamelCase=True , __UpperCamelCase=1E-5 , __UpperCamelCase=0.0_2 , **__UpperCamelCase , ) -> Any:
'''simple docstring'''
snake_case__ : List[str] = num_channels
snake_case__ : Dict = patch_size
snake_case__ : Optional[int] = stride
snake_case__ : str = padding
snake_case__ : List[str] = pool_size
snake_case__ : List[Any] = hidden_sizes
snake_case__ : List[Any] = mlp_ratio
snake_case__ : Union[str, Any] = depths
snake_case__ : Dict = patch_sizes
snake_case__ : Dict = strides
snake_case__ : Dict = num_encoder_blocks
snake_case__ : Union[str, Any] = drop_path_rate
snake_case__ : List[str] = hidden_act
snake_case__ : Optional[Any] = use_layer_scale
snake_case__ : int = layer_scale_init_value
snake_case__ : Dict = initializer_range
super().__init__(**__UpperCamelCase )
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = version.parse("""1.11""" )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __a ( self ) -> float:
'''simple docstring'''
return 2E-3
| 699 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , __UpperCamelCase=1 / 255 , __UpperCamelCase=True , ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
snake_case__ : str = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : int = num_channels
snake_case__ : Union[str, Any] = min_resolution
snake_case__ : Optional[Any] = max_resolution
snake_case__ : int = do_resize
snake_case__ : Any = size
snake_case__ : Dict = do_normalize
snake_case__ : List[str] = image_mean
snake_case__ : Optional[Any] = image_std
snake_case__ : List[str] = do_rescale
snake_case__ : Optional[Any] = rescale_factor
snake_case__ : Dict = do_pad
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __a ( self , __UpperCamelCase , __UpperCamelCase=False ) -> str:
'''simple docstring'''
if not batched:
snake_case__ : List[str] = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
snake_case__ : str = image.size
else:
snake_case__ : Tuple = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Dict = int(self.size['shortest_edge'] * h / w )
snake_case__ : int = self.size['shortest_edge']
elif w > h:
snake_case__ : Union[str, Any] = self.size['shortest_edge']
snake_case__ : Tuple = int(self.size['shortest_edge'] * w / h )
else:
snake_case__ : str = self.size['shortest_edge']
snake_case__ : Any = self.size['shortest_edge']
else:
snake_case__ : str = []
for image in image_inputs:
snake_case__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : int = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
snake_case__ : Union[str, Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __snake_case ( _lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = ConditionalDetrImageProcessor if is_vision_available() else None
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = ConditionalDetrImageProcessingTester(self )
@property
def __a ( self ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'size' ) )
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
snake_case__ : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def __a ( self ) -> str:
'''simple docstring'''
pass
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case__ : Optional[Any] = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Optional[Any] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
snake_case__ : Union[str, Any] = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case__ : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Any = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
snake_case__ : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case__ : Optional[Any] = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Optional[Any] = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
snake_case__ : Dict = json.loads(f.read() )
snake_case__ : Union[str, Any] = {'image_id': 39769, 'annotations': target}
# encode them
snake_case__ : Any = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
snake_case__ : int = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors='pt' )
# verify pixel values
snake_case__ : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCamelCase )
snake_case__ : List[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
snake_case__ : List[str] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCamelCase ) )
# verify boxes
snake_case__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCamelCase )
snake_case__ : Optional[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
snake_case__ : Dict = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCamelCase ) )
# verify is_crowd
snake_case__ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCamelCase ) )
# verify class_labels
snake_case__ : Optional[int] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCamelCase ) )
# verify orig_size
snake_case__ : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCamelCase ) )
# verify size
snake_case__ : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCamelCase ) )
@slow
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
snake_case__ : str = json.loads(f.read() )
snake_case__ : Any = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
snake_case__ : Tuple = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
snake_case__ : Optional[Any] = ConditionalDetrImageProcessor(format='coco_panoptic' )
snake_case__ : int = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors='pt' )
# verify pixel values
snake_case__ : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCamelCase )
snake_case__ : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
snake_case__ : Optional[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCamelCase ) )
# verify boxes
snake_case__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCamelCase )
snake_case__ : Tuple = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
snake_case__ : int = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCamelCase ) )
# verify is_crowd
snake_case__ : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCamelCase ) )
# verify class_labels
snake_case__ : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCamelCase ) )
# verify masks
snake_case__ : Optional[int] = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __UpperCamelCase )
# verify orig_size
snake_case__ : Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCamelCase ) )
# verify size
snake_case__ : Tuple = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCamelCase ) )
| 702 | import numpy as np
import qiskit
def UpperCamelCase__ ( A__ = 8 , A__ = None ) -> str:
snake_case__ : Optional[int] = np.random.default_rng(seed=A__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
snake_case__ : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
snake_case__ : Tuple = rng.integers(2 , size=A__ )
# The set of states Alice will prepare.
snake_case__ : List[str] = rng.integers(2 , size=A__ )
# Measurement basis for Bob's qubits.
snake_case__ : List[Any] = rng.integers(2 , size=A__ )
# Quantum Circuit to simulate BB84
snake_case__ : Any = qiskit.QuantumCircuit(A__ , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(A__ ):
if alice_state[index] == 1:
bbaa_circ.x(A__ )
if alice_basis[index] == 1:
bbaa_circ.h(A__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(A__ ):
if bob_basis[index] == 1:
bbaa_circ.h(A__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
snake_case__ : List[str] = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
snake_case__ : Optional[Any] = qiskit.execute(A__ , A__ , shots=1 , seed_simulator=A__ )
# Returns the result of measurement.
snake_case__ : Union[str, Any] = job.result().get_counts(A__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
snake_case__ : Optional[Any] = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
A__ , A__ , A__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
snake_case__ : Tuple = gen_key[:key_len] if len(A__ ) >= key_len else gen_key.ljust(A__ , '0' )
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 699 | 0 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __snake_case :
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=None , __UpperCamelCase=2 , ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Any = parent
snake_case__ : Tuple = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : Optional[int] = patch_size
snake_case__ : int = num_channels
snake_case__ : str = is_training
snake_case__ : str = use_labels
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Optional[int] = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : Any = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : Any = type_sequence_label_size
snake_case__ : Any = initializer_range
snake_case__ : Union[str, Any] = scope
snake_case__ : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
snake_case__ : str = (image_size // patch_size) ** 2
snake_case__ : List[Any] = num_patches + 2
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[Any] = None
if self.use_labels:
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : str = self.get_config()
return config, pixel_values, labels
def __a ( self ) -> int:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
'''simple docstring'''
snake_case__ : Dict = DeiTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Any = DeiTForMaskedImageModeling(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ : Dict = model(__UpperCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case__ : Dict = 1
snake_case__ : Any = DeiTForMaskedImageModeling(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : Any = model(__UpperCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
snake_case__ : Union[str, Any] = self.type_sequence_label_size
snake_case__ : List[Any] = DeiTForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ : Optional[Any] = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ : Tuple = 1
snake_case__ : List[Any] = DeiTForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : Union[str, Any] = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = self.prepare_config_and_inputs()
(
snake_case__
) : Any = config_and_inputs
snake_case__ : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[int] = DeiTModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __a ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def __a ( self ) -> List[Any]:
'''simple docstring'''
pass
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(__UpperCamelCase )
snake_case__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Dict = [*signature.parameters.keys()]
snake_case__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Any = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __a ( self ) -> Tuple:
'''simple docstring'''
if not self.model_tester.is_training:
return
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Tuple = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
snake_case__ : Dict = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
snake_case__ : Tuple = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
snake_case__ : Union[str, Any] = model(**__UpperCamelCase ).loss
loss.backward()
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case__ : List[Any] = False
snake_case__ : List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
snake_case__ : Any = model_class(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCamelCase )
model.train()
snake_case__ : Optional[Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
snake_case__ : List[str] = model(**__UpperCamelCase ).loss
loss.backward()
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCamelCase ),
*get_values(__UpperCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
snake_case__ : Dict = problem_type['title']
snake_case__ : Any = problem_type['num_labels']
snake_case__ : Tuple = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
snake_case__ : Dict = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if problem_type["num_labels"] > 1:
snake_case__ : List[Any] = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
snake_case__ : Any = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCamelCase ) as warning_list:
snake_case__ : List[Any] = model(**__UpperCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : int = DeiTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def UpperCamelCase__ ( ) -> str:
snake_case__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def __a ( self ) -> Dict:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Dict = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
__UpperCamelCase )
snake_case__ : int = self.default_image_processor
snake_case__ : Tuple = prepare_img()
snake_case__ : Dict = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : Any = model(**__UpperCamelCase )
# verify the logits
snake_case__ : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case__ : List[Any] = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[Any] = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' )
snake_case__ : str = self.default_image_processor
snake_case__ : Dict = prepare_img()
snake_case__ : Tuple = image_processor(images=__UpperCamelCase , return_tensors='pt' )
snake_case__ : List[str] = inputs.pixel_values.to(__UpperCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
snake_case__ : List[Any] = model(__UpperCamelCase )
| 703 | def UpperCamelCase__ ( A__ , A__ , A__ ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case__ : Dict = _modexpt(A__ , exponent // 2 , A__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A__ , exponent - 1 , A__ )) % modulo_value
def UpperCamelCase__ ( A__ = 1777 , A__ = 1855 , A__ = 8 ) -> int:
snake_case__ : Tuple = base
for _ in range(1 , A__ ):
snake_case__ : Any = _modexpt(A__ , A__ , 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 699 | 0 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 704 | # tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ : Tuple = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCamelCase__ ( A__ ) -> Optional[Any]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A__ )
def UpperCamelCase__ ( A__ ) -> Optional[Any]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case__ : Union[str, Any] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(A__ , id=A__ )
| 699 | 0 |
import random
def UpperCamelCase__ ( A__ , A__ ) -> tuple:
snake_case__ : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(A__ )
elif element > pivot:
greater.append(A__ )
else:
equal.append(A__ )
return less, equal, greater
def UpperCamelCase__ ( A__ , A__ ) -> Optional[int]:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(A__ ) or index < 0:
return None
snake_case__ : Union[str, Any] = items[random.randint(0 , len(A__ ) - 1 )]
snake_case__ : List[str] = 0
snake_case__ : List[str] = _partition(A__ , A__ )
snake_case__ : str = len(A__ )
snake_case__ : Union[str, Any] = len(A__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(A__ , A__ )
# must be in larger
else:
return quick_select(A__ , index - (m + count) )
| 705 | def UpperCamelCase__ ( A__ ) -> list[int]:
if length <= 0 or not isinstance(A__ , A__ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(A__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 699 | 0 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCAmelCase__ : List[str] = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
lowerCAmelCase__ : str = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def UpperCamelCase__ ( A__ , A__=False ) -> int:
snake_case__ : Optional[Any] = create_model(
'HTSAT-tiny' , 'roberta' , A__ , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=A__ , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def UpperCamelCase__ ( A__ ) -> int:
snake_case__ : Union[str, Any] = {}
snake_case__ : Union[str, Any] = r'.*sequential.(\d+).*'
snake_case__ : int = r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
snake_case__ : List[str] = key.replace(A__ , A__ )
if re.match(A__ , A__ ):
# replace sequential layers with list
snake_case__ : str = re.match(A__ , A__ ).group(1 )
snake_case__ : List[Any] = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(A__ )//3}.linear.""" )
elif re.match(A__ , A__ ):
snake_case__ : Optional[int] = int(re.match(A__ , A__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
snake_case__ : List[str] = 1 if projecton_layer == 0 else 2
snake_case__ : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
snake_case__ : str = value
snake_case__ : Optional[int] = mixed_qkv.size(0 ) // 3
snake_case__ : Tuple = mixed_qkv[:qkv_dim]
snake_case__ : int = mixed_qkv[qkv_dim : qkv_dim * 2]
snake_case__ : str = mixed_qkv[qkv_dim * 2 :]
snake_case__ : Dict = query_layer
snake_case__ : Any = key_layer
snake_case__ : str = value_layer
else:
snake_case__ : Tuple = value
return model_state_dict
def UpperCamelCase__ ( A__ , A__ , A__ , A__=False ) -> List[Any]:
snake_case__ : int = init_clap(A__ , enable_fusion=A__ )
clap_model.eval()
snake_case__ : Optional[int] = clap_model.state_dict()
snake_case__ : Optional[Any] = rename_state_dict(A__ )
snake_case__ : List[str] = ClapConfig()
snake_case__ : str = enable_fusion
snake_case__ : Optional[Any] = ClapModel(A__ )
# ignore the spectrogram embedding layer
model.load_state_dict(A__ , strict=A__ )
model.save_pretrained(A__ )
transformers_config.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : str = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
lowerCAmelCase__ : Optional[int] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 706 | import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase__ : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def UpperCamelCase__ ( A__ , A__ , A__ ) -> List[str]:
snake_case__ : int = state_dict.pop(A__ )
snake_case__ : Union[str, Any] = val
def UpperCamelCase__ ( A__ ) -> int:
snake_case__ : List[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case__ : Any = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
snake_case__ : Optional[int] = value
else:
snake_case__ : Optional[int] = value
return new_state_dict
def UpperCamelCase__ ( A__ , A__=False ) -> Optional[int]:
snake_case__ : Optional[int] = ''
if is_panoptic:
snake_case__ : Tuple = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
snake_case__ : str = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Union[str, Any] = in_proj_weight[:256, :]
snake_case__ : Union[str, Any] = in_proj_bias[:256]
snake_case__ : Union[str, Any] = in_proj_weight[256:512, :]
snake_case__ : Optional[Any] = in_proj_bias[256:512]
snake_case__ : List[str] = in_proj_weight[-256:, :]
snake_case__ : Tuple = in_proj_bias[-256:]
def UpperCamelCase__ ( ) -> Tuple:
snake_case__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : str = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( A__ , A__ ) -> str:
snake_case__ : List[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case__ : Any = 'resnet101'
if "dc5" in model_name:
snake_case__ : Any = True
snake_case__ : int = 'panoptic' in model_name
if is_panoptic:
snake_case__ : str = 250
else:
snake_case__ : Union[str, Any] = 91
snake_case__ : Optional[int] = 'huggingface/label-files'
snake_case__ : Optional[Any] = 'coco-detection-id2label.json'
snake_case__ : str = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
snake_case__ : List[Any] = {int(A__ ): v for k, v in idalabel.items()}
snake_case__ : Any = idalabel
snake_case__ : int = {v: k for k, v in idalabel.items()}
# load image processor
snake_case__ : List[Any] = 'coco_panoptic' if is_panoptic else 'coco_detection'
snake_case__ : List[Any] = ConditionalDetrImageProcessor(format=A__ )
# prepare image
snake_case__ : List[str] = prepare_img()
snake_case__ : Any = image_processor(images=A__ , return_tensors='pt' )
snake_case__ : Dict = encoding['pixel_values']
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
snake_case__ : Any = torch.hub.load('DeppMeng/ConditionalDETR' , A__ , pretrained=A__ ).eval()
snake_case__ : Tuple = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case__ : List[Any] = 'conditional_detr.' + src
rename_key(A__ , A__ , A__ )
snake_case__ : Dict = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case__ : Optional[int] = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
snake_case__ : List[Any] = state_dict.pop(A__ )
snake_case__ : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case__ : str = state_dict.pop(A__ )
snake_case__ : List[Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
snake_case__ : Union[str, Any] = state_dict.pop(A__ )
snake_case__ : Dict = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
snake_case__ : List[Any] = state_dict.pop(A__ )
snake_case__ : Optional[int] = val
# finally, create HuggingFace model and load state dict
snake_case__ : Union[str, Any] = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
model.push_to_hub(repo_id=A__ , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
snake_case__ : Tuple = conditional_detr(A__ )
snake_case__ : str = model(A__ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 699 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase__ : Any = logging.get_logger(__name__)
lowerCAmelCase__ : Optional[Any] = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = """codegen"""
__lowerCamelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCamelCase=50400 , __UpperCamelCase=2048 , __UpperCamelCase=2048 , __UpperCamelCase=4096 , __UpperCamelCase=28 , __UpperCamelCase=16 , __UpperCamelCase=64 , __UpperCamelCase=None , __UpperCamelCase="gelu_new" , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=1E-5 , __UpperCamelCase=0.0_2 , __UpperCamelCase=True , __UpperCamelCase=50256 , __UpperCamelCase=50256 , __UpperCamelCase=False , **__UpperCamelCase , ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = vocab_size
snake_case__ : List[str] = n_ctx
snake_case__ : List[str] = n_positions
snake_case__ : Union[str, Any] = n_embd
snake_case__ : Union[str, Any] = n_layer
snake_case__ : Optional[int] = n_head
snake_case__ : Optional[int] = n_inner
snake_case__ : List[str] = rotary_dim
snake_case__ : List[Any] = activation_function
snake_case__ : Union[str, Any] = resid_pdrop
snake_case__ : List[str] = embd_pdrop
snake_case__ : List[Any] = attn_pdrop
snake_case__ : List[str] = layer_norm_epsilon
snake_case__ : int = initializer_range
snake_case__ : List[str] = use_cache
snake_case__ : Tuple = bos_token_id
snake_case__ : List[str] = eos_token_id
super().__init__(
bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , tie_word_embeddings=__UpperCamelCase , **__UpperCamelCase )
class __snake_case ( _lowerCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase = "default" , __UpperCamelCase = None , __UpperCamelCase = False , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(__UpperCamelCase , task=__UpperCamelCase , patching_specs=__UpperCamelCase , use_past=__UpperCamelCase )
if not getattr(self._config , 'pad_token_id' , __UpperCamelCase ):
# TODO: how to do that better?
snake_case__ : str = 0
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
snake_case__ : Tuple = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction='inputs' )
snake_case__ : List[str] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
snake_case__ : List[Any] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __a ( self ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def __a ( self ) -> int:
'''simple docstring'''
return self._config.n_head
def __a ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case__ : int = super(__UpperCamelCase , self ).generate_dummy_inputs(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
# We need to order the input in the way they appears in the forward()
snake_case__ : Optional[Any] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
snake_case__ : Optional[int] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
snake_case__ : Dict = seqlen + 2
snake_case__ : List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case__ : Dict = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(self.num_layers )
]
snake_case__ : Any = common_inputs['attention_mask']
if self.use_past:
snake_case__ : Any = ordered_inputs['attention_mask'].dtype
snake_case__ : List[str] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
return ordered_inputs
@property
def __a ( self ) -> int:
'''simple docstring'''
return 13
| 707 | from collections import namedtuple
lowerCAmelCase__ : Union[str, Any] = namedtuple('''from_to''', '''from_ to''')
lowerCAmelCase__ : Tuple = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_01, 10_00),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_04_54, 2_64.1_72),
'''cubicyard''': from_to(0.7_64_55, 1.3_07_95),
'''cubicfoot''': from_to(0.0_28, 35.31_47),
'''cup''': from_to(0.0_00_23_65_88, 42_26.75),
}
def UpperCamelCase__ ( A__ , A__ , A__ ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ', '.join(A__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ', '.join(A__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 699 | 0 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase__ ( A__ , A__ , A__ = 10**-10 ) -> float:
snake_case__ : List[Any] = a
while True:
snake_case__ : Optional[Any] = Decimal(A__ ) - (
Decimal(eval(A__ ) ) / Decimal(eval(str(diff(A__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(A__ ) ) < precision: # noqa: S307
return float(A__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
| 708 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Tuple = logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] = '''▁'''
lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ : Optional[Any] = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
lowerCAmelCase__ : str = {
'''facebook/xglm-564M''': 20_48,
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCamelCase , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase = None , **__UpperCamelCase , ) -> None:
'''simple docstring'''
snake_case__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case__ : Tuple = 7
snake_case__ : Dict = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case__ : Union[str, Any] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
snake_case__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
snake_case__ : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case__ : Tuple = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case__ : Tuple = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
snake_case__ : List[Any] = len(self.sp_model )
snake_case__ : Optional[Any] = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__UpperCamelCase )
snake_case__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = self.__dict__.copy()
snake_case__ : Optional[Any] = None
snake_case__ : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case__ : Any = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case__ : str = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __a ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase ))
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase ))
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case__ : int = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __a ( self ) -> Tuple:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def __a ( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case__ : Optional[Any] = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __a ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
snake_case__ : int = ''.join(__UpperCamelCase ).replace(__UpperCamelCase , ' ' ).strip()
return out_string
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ : List[str] = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , 'wb' ) as fi:
snake_case__ : Any = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 699 | 0 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( A__ , A__ , A__ ) -> List[str]:
# Initialise PyTorch model
snake_case__ : Optional[Any] = LxmertConfig.from_json_file(A__ )
print(F"""Building PyTorch model from configuration: {config}""" )
snake_case__ : Tuple = LxmertForPreTraining(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A__ , A__ , A__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
lowerCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 709 | import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCAmelCase__ : Any = logging.get_logger(__name__)
lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : Any = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Any = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Tuple = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Dict = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_12,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_12,
}
lowerCAmelCase__ : Union[str, Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_12,
'''facebook/dpr-question_encoder-multiset-base''': 5_12,
}
lowerCAmelCase__ : Optional[Any] = {
'''facebook/dpr-reader-single-nq-base''': 5_12,
'''facebook/dpr-reader-multiset-base''': 5_12,
}
lowerCAmelCase__ : Tuple = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase__ : Any = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase__ : List[str] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = DPRContextEncoderTokenizer
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = DPRQuestionEncoderTokenizer
lowerCAmelCase__ : Tuple = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCAmelCase__ : List[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCAmelCase__ : int = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(_lowerCamelCase )
class __snake_case :
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
elif titles is None or texts is None:
snake_case__ : Optional[Any] = titles if texts is None else texts
return super().__call__(
__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ : int = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles]
snake_case__ : Optional[int] = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts]
snake_case__ : List[Any] = len(__UpperCamelCase )
snake_case__ : str = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages
assert len(__UpperCamelCase ) == len(
__UpperCamelCase ), F"""There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts."""
snake_case__ : Optional[int] = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids']
snake_case__ : Optional[Any] = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids']
snake_case__ : Union[str, Any] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase )
]
}
if return_attention_mask is not False:
snake_case__ : List[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
snake_case__ : Union[str, Any] = attention_mask
return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = 64 , __UpperCamelCase = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
snake_case__ : Optional[Any] = reader_input['input_ids']
snake_case__ , snake_case__ , snake_case__ : Any = reader_output[:3]
snake_case__ : List[str] = len(__UpperCamelCase )
snake_case__ : Tuple = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ )
snake_case__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
snake_case__ : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
snake_case__ : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
snake_case__ : Union[str, Any] = sequence_ids.index(self.pad_token_id )
else:
snake_case__ : str = len(__UpperCamelCase )
snake_case__ : Dict = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
snake_case__ : Any = []
for start_index, start_score in enumerate(__UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
snake_case__ : str = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase )
snake_case__ : Any = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
snake_case__ : str = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_lowerCamelCase )
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = ["""input_ids""", """attention_mask"""]
__lowerCamelCase = DPRReaderTokenizer
| 699 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( A__ , A__ ) -> str:
if not (isinstance(A__ , A__ ) and isinstance(A__ , A__ )):
raise ValueError('longest_common_substring() takes two strings for inputs' )
snake_case__ : Optional[int] = len(A__ )
snake_case__ : List[str] = len(A__ )
snake_case__ : Any = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
snake_case__ : int = 0
snake_case__ : List[str] = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
snake_case__ : str = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
snake_case__ : int = i
snake_case__ : List[str] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __a ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
snake_case__ : Any = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
torch.manual_seed(0 )
snake_case__ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case__ : Tuple = CLIPTextModel(__UpperCamelCase )
snake_case__ : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : Union[str, Any] = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' )
if str(__UpperCamelCase ).startswith('mps' ):
snake_case__ : str = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ : Dict = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case__ : str = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[int] = self.get_dummy_components()
snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : Optional[int] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Tuple = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : List[str] = sd_pipe(**__UpperCamelCase ).images
snake_case__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ : str = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Union[str, Any] = self.get_dummy_components()
snake_case__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : List[Any] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Union[str, Any] = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : List[str] = 'french fries'
snake_case__ : Optional[Any] = sd_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase )
snake_case__ : Union[str, Any] = output.images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ : Any = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : List[str] = self.get_dummy_components()
snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : str = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Dict = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : Any = [inputs['prompt']] * 2
snake_case__ : Optional[int] = np.array(inputs['image'] ).astype(np.floataa ) / 2_5_5.0
snake_case__ : Optional[int] = torch.from_numpy(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase )
snake_case__ : Any = image / 2 + 0.5
snake_case__ : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
snake_case__ : List[Any] = image.repeat(2 , 1 , 1 , 1 )
snake_case__ : Optional[int] = sd_pipe(**__UpperCamelCase ).images
snake_case__ : Union[str, Any] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
snake_case__ : List[Any] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[int] = self.get_dummy_components()
snake_case__ : Tuple = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' )
snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : List[str] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : str = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : Any = sd_pipe(**__UpperCamelCase ).images
snake_case__ : int = image[0, -3:, -3:, -1]
snake_case__ : Tuple = [round(__UpperCamelCase , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(__UpperCamelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
snake_case__ : List[Any] = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> int:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Optional[int] = self.get_dummy_components()
snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : Union[str, Any] = VaeImageProcessor(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase )
snake_case__ : Optional[int] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Optional[Any] = pipe(**self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' ) )[0]
snake_case__ : Union[str, Any] = components['vae']
snake_case__ : str = self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
snake_case__ : List[str] = vae.encode(inputs[image_param] ).latent_dist.mode()
snake_case__ : Dict = pipe(**__UpperCamelCase )[0]
snake_case__ : str = np.abs(out - out_latents_inputs ).max()
self.assertLess(__UpperCamelCase , 1E-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self , __UpperCamelCase=0 ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = torch.manual_seed(__UpperCamelCase )
snake_case__ : List[str] = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
snake_case__ : int = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : Tuple = self.get_inputs()
snake_case__ : List[Any] = pipe(**__UpperCamelCase ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case__ : Dict = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase )
snake_case__ : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : Dict = self.get_inputs()
snake_case__ : Dict = pipe(**__UpperCamelCase ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case__ : List[Any] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase )
snake_case__ : Tuple = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : Optional[int] = self.get_inputs()
snake_case__ : Optional[int] = pipe(**__UpperCamelCase ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case__ : int = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = 0
def callback_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> None:
snake_case__ : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case__ : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
snake_case__ : int = latents[0, -3:, -3:, -1]
snake_case__ : List[str] = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
snake_case__ : Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
snake_case__ : Dict = latents[0, -3:, -3:, -1]
snake_case__ : Optional[Any] = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
snake_case__ : str = False
snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
snake_case__ : int = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : int = self.get_inputs()
pipe(**__UpperCamelCase , callback=__UpperCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __a ( self ) -> Any:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
snake_case__ : Dict = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case__ : str = self.get_inputs()
snake_case__ : Tuple = pipe(**__UpperCamelCase )
snake_case__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : int = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case__ : Tuple = inputs['image'].resize((504, 504) )
snake_case__ : str = 'timbrooks/instruct-pix2pix'
snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__UpperCamelCase , safety_checker=__UpperCamelCase , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : str = pipe(**__UpperCamelCase )
snake_case__ : List[Any] = output.images[0]
snake_case__ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
snake_case__ : List[str] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 699 | 0 |
from __future__ import annotations
lowerCAmelCase__ : str = 1.6_021E-19 # units = C
def UpperCamelCase__ ( A__ , A__ , A__ , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif conductivity < 0:
raise ValueError('Conductivity cannot be negative' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative' )
elif mobility < 0:
raise ValueError('mobility cannot be negative' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 | from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 699 | 0 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase__ : Tuple = 16
lowerCAmelCase__ : Optional[Any] = 32
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ = 16 ) -> List[str]:
snake_case__ : Tuple = AutoTokenizer.from_pretrained('bert-base-cased' )
snake_case__ : int = DatasetDict(
{
'train': dataset['train'].select(A__ ),
'validation': dataset['train'].select(A__ ),
'test': dataset['validation'],
} )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case__ : Optional[Any] = datasets.map(
A__ , batched=A__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ : str = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case__ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case__ : str = 16
elif accelerator.mixed_precision != "no":
snake_case__ : int = 8
else:
snake_case__ : str = None
return tokenizer.pad(
A__ , padding='longest' , max_length=A__ , pad_to_multiple_of=A__ , return_tensors='pt' , )
# Instantiate dataloaders.
snake_case__ : Optional[int] = DataLoader(
tokenized_datasets['train'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
snake_case__ : str = DataLoader(
tokenized_datasets['validation'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
snake_case__ : List[str] = DataLoader(
tokenized_datasets['test'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader, test_dataloader
def UpperCamelCase__ ( A__ , A__ ) -> int:
# New Code #
snake_case__ : Tuple = []
# Download the dataset
snake_case__ : Optional[int] = load_dataset('glue' , 'mrpc' )
# Create our splits
snake_case__ : Any = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
snake_case__ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ : List[str] = config['lr']
snake_case__ : Union[str, Any] = int(config['num_epochs'] )
snake_case__ : Tuple = int(config['seed'] )
snake_case__ : str = int(config['batch_size'] )
snake_case__ : Any = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
snake_case__ : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case__ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
snake_case__ : Tuple = MAX_GPU_BATCH_SIZE
set_seed(A__ )
# New Code #
# Create our folds:
snake_case__ : Any = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
snake_case__ : Union[str, Any] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(A__ ):
snake_case__ : int = get_fold_dataloaders(
A__ , A__ , A__ , A__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=A__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ : str = model.to(accelerator.device )
# Instantiate optimizer
snake_case__ : Optional[int] = AdamW(params=model.parameters() , lr=A__ )
# Instantiate scheduler
snake_case__ : List[Any] = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=100 , num_training_steps=(len(A__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ : int = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Now we train the model
for epoch in range(A__ ):
model.train()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case__ : List[Any] = model(**A__ )
snake_case__ : Any = outputs.loss
snake_case__ : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : Any = model(**A__ )
snake_case__ : str = outputs.logits.argmax(dim=-1 )
snake_case__ : List[Any] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=A__ , references=A__ , )
snake_case__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , A__ )
# New Code #
# We also run predictions on the test set at the very end
snake_case__ : List[str] = []
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : int = model(**A__ )
snake_case__ : Dict = outputs.logits
snake_case__ : List[Any] = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(A__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
snake_case__ : str = torch.cat(A__ , dim=0 )
snake_case__ : Tuple = torch.stack(A__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
snake_case__ : Union[str, Any] = metric.compute(predictions=A__ , references=A__ )
accelerator.print('Average test metrics from all folds:' , A__ )
def UpperCamelCase__ ( ) -> Optional[int]:
snake_case__ : Tuple = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=A__ , default=A__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=A__ , default=3 , help='The number of splits to perform across the dataset' )
snake_case__ : Any = parser.parse_args()
snake_case__ : Optional[Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 712 | from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class __snake_case :
__lowerCamelCase = field(
metadata={"""help""": """The output directory where the model will be written."""} ,)
__lowerCamelCase = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} ,)
__lowerCamelCase = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} ,)
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def UpperCamelCase__ ( ) -> Union[str, Any]:
snake_case__ : str = HfArgumentParser((ModelArguments,) )
((snake_case__) , ) : Dict = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
snake_case__ : List[str] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
snake_case__ : Optional[int] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
snake_case__ : Optional[Any] = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
snake_case__ : List[str] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
snake_case__ : Any = True
snake_case__ : Dict = True
snake_case__ : Tuple = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=A__ , decoder_config=A__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
snake_case__ : Optional[Any] = decoder_config.decoder_start_token_id
snake_case__ : Tuple = decoder_config.pad_token_id
if decoder_start_token_id is None:
snake_case__ : Optional[Any] = decoder_config.bos_token_id
if pad_token_id is None:
snake_case__ : int = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
snake_case__ : Union[str, Any] = decoder_config.eos_token_id
snake_case__ : Optional[int] = decoder_start_token_id
snake_case__ : int = pad_token_id
snake_case__ : Tuple = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
snake_case__ : int = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 699 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( _lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = KandinskyVaaInpaintPipeline
__lowerCamelCase = ["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
__lowerCamelCase = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
__lowerCamelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowerCamelCase = False
@property
def __a ( self ) -> Dict:
'''simple docstring'''
return 32
@property
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
return 32
@property
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.time_input_dim
@property
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __a ( self ) -> List[Any]:
'''simple docstring'''
return 100
@property
def __a ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Dict = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
snake_case__ : Dict = UNetaDConditionModel(**__UpperCamelCase )
return model
@property
def __a ( self ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __a ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Dict = self.dummy_unet
snake_case__ : Dict = self.dummy_movq
snake_case__ : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=__UpperCamelCase , )
snake_case__ : List[str] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__UpperCamelCase )
# create init_image
snake_case__ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : List[str] = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' ).resize((256, 256) )
# create mask
snake_case__ : str = np.ones((64, 64) , dtype=np.floataa )
snake_case__ : str = 0
if str(__UpperCamelCase ).startswith('mps' ):
snake_case__ : Any = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ : List[Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case__ : Any = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[int] = 'cpu'
snake_case__ : Optional[int] = self.get_dummy_components()
snake_case__ : int = self.pipeline_class(**__UpperCamelCase )
snake_case__ : Optional[int] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : List[str] = pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
snake_case__ : int = output.images
snake_case__ : List[Any] = pipe(
**self.get_dummy_inputs(__UpperCamelCase ) , return_dict=__UpperCamelCase , )[0]
snake_case__ : Dict = image[0, -3:, -3:, -1]
snake_case__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
snake_case__ : Tuple = np.array(
[0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __a ( self ) -> int:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
snake_case__ : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
snake_case__ : Dict = np.ones((768, 768) , dtype=np.floataa )
snake_case__ : List[Any] = 0
snake_case__ : Optional[int] = 'a hat'
snake_case__ : Any = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCamelCase )
snake_case__ : Tuple = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
snake_case__ : List[str] = pipeline.to(__UpperCamelCase )
pipeline.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case__ : Any = pipe_prior(
__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
snake_case__ : List[str] = pipeline(
image=__UpperCamelCase , mask_image=__UpperCamelCase , image_embeds=__UpperCamelCase , negative_image_embeds=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
snake_case__ : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
| 713 | import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ , A__ = None , ) -> Optional[int]:
snake_case__ : List[str] = {}
if train_file is not None:
snake_case__ : Tuple = [train_file]
if eval_file is not None:
snake_case__ : Dict = [eval_file]
if test_file is not None:
snake_case__ : str = [test_file]
snake_case__ : Optional[Any] = datasets.load_dataset('csv' , data_files=A__ )
snake_case__ : Any = list(ds[list(files.keys() )[0]].features.keys() )
snake_case__ : Optional[Any] = features_name.pop(A__ )
snake_case__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case__ : str = {label: i for i, label in enumerate(A__ )}
snake_case__ : int = tokenizer.model_input_names
snake_case__ : int = {}
if len(A__ ) == 1:
for k in files.keys():
snake_case__ : str = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=A__ , max_length=A__ , padding='max_length' ) , batched=A__ , )
elif len(A__ ) == 2:
for k in files.keys():
snake_case__ : Optional[int] = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=A__ , max_length=A__ , padding='max_length' , ) , batched=A__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case__ : int = {k: v for k, v in ex.items() if k in input_names}
snake_case__ : Any = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case__ : int = {k: v for k, v in ex.items() if k in input_names}
snake_case__ : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case__ : Dict = {k: v for k, v in ex.items() if k in input_names}
snake_case__ : List[str] = labelaid[ex[label_name]]
yield (d, label)
snake_case__ : Any = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case__ : str = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case__ : Optional[int] = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case__ : Optional[int] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case__ : List[str] = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case__ : str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase__ : List[str] = logging.getLogger(__name__)
@dataclass
class __snake_case :
__lowerCamelCase = field(metadata={"""help""": """Which column contains the label"""} )
__lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the training file"""} )
__lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the development file"""} )
__lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the test file"""} )
__lowerCamelCase = field(
default=128 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class __snake_case :
__lowerCamelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
def UpperCamelCase__ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case__ , snake_case__ , snake_case__ : Dict = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=A__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case__ : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
def compute_metrics(A__ ) -> Dict:
snake_case__ : Optional[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case__ : Any = TFTrainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case__ : Dict = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case__ : Tuple = trainer.evaluate()
snake_case__ : Any = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(A__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(A__ )
return results
if __name__ == "__main__":
main()
| 699 | 0 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : List[Any] = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __snake_case ( _lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = DebertaVaTokenizer
__lowerCamelCase = DebertaVaTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
def __a ( self ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Union[str, Any] = DebertaVaTokenizer(__UpperCamelCase , unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
snake_case__ : Tuple = 'this is a test'
snake_case__ : Optional[int] = 'this is a test'
return input_text, output_text
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Tuple = '<pad>'
snake_case__ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '[PAD]' )
self.assertEqual(len(__UpperCamelCase ) , 30001 )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : str = ' \tHeLLo!how \n Are yoU? '
snake_case__ : List[str] = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
snake_case__ : Dict = DebertaVaTokenizer(__UpperCamelCase , do_lower_case=__UpperCamelCase )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ : Optional[Any] = DebertaVaTokenizerFast(__UpperCamelCase , do_lower_case=__UpperCamelCase )
snake_case__ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def __a ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def __a ( self ) -> Tuple:
'''simple docstring'''
pass
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[int] = 'I was born in 92000, and this is falsé.'
snake_case__ : List[Any] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case__ : Optional[int] = DebertaVaTokenizer(__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ : Tuple = DebertaVaTokenizerFast(__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Optional[Any] = 'I was born in 92000, and this is falsé.'
snake_case__ : str = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case__ : List[Any] = DebertaVaTokenizer(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ : Dict = DebertaVaTokenizerFast(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Dict = 'I was born in 92000, and this is falsé.'
snake_case__ : str = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case__ : Any = DebertaVaTokenizer(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ : List[Any] = DebertaVaTokenizerFast(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Dict = 'I was born in 92000, and this is falsé.'
snake_case__ : int = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case__ : int = DebertaVaTokenizer(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ : List[str] = DebertaVaTokenizerFast(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = ' \tHeLLo!how \n Are yoU? '
snake_case__ : str = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
snake_case__ : int = DebertaVaTokenizer(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ : Union[str, Any] = DebertaVaTokenizerFast(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : Any = self.get_rust_tokenizer()
snake_case__ : Union[str, Any] = 'I was born in 92000, and this is falsé.'
snake_case__ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
snake_case__ : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ : Dict = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
snake_case__ : Optional[Any] = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ : List[Any] = self.get_rust_tokenizer()
snake_case__ : Optional[Any] = tokenizer.encode(__UpperCamelCase )
snake_case__ : Optional[Any] = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : List[Any] = 'This is a test'
snake_case__ : Any = [13, 1, 4398, 25, 21, 1289]
snake_case__ : int = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
snake_case__ : List[Any] = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
snake_case__ : Optional[int] = DebertaVaTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
snake_case__ : List[Any] = DebertaVaTokenizerFast(__UpperCamelCase , keep_accents=__UpperCamelCase )
snake_case__ : str = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ : Optional[Any] = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ : Dict = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ : List[Any] = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ : Optional[Any] = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ : Any = rust_tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# fmt: off
snake_case__ : Dict = 'I was born in 92000, and this is falsé.'
snake_case__ : Dict = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
snake_case__ : Dict = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
snake_case__ : List[str] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case__ : List[Any] = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ : str = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ : Any = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ : Optional[Any] = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ : Tuple = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ : Dict = rust_tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Any = DebertaVaTokenizer(__UpperCamelCase )
snake_case__ : List[Any] = tokenizer.encode('sequence builders' )
snake_case__ : Union[str, Any] = tokenizer.encode('multi-sequence build' )
snake_case__ : int = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
snake_case__ : int = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __UpperCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __UpperCamelCase , )
@slow
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = {'input_ids': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 714 | from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__)
class __snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
__lowerCamelCase = None
__lowerCamelCase = None
class __snake_case ( folder_based_builder.FolderBasedBuilder ):
__lowerCamelCase = datasets.Audio()
__lowerCamelCase = """audio"""
__lowerCamelCase = AudioFolderConfig
__lowerCamelCase = 42 # definition at the bottom of the script
__lowerCamelCase = AudioClassification(audio_column="""audio""" ,label_column="""label""" )
lowerCAmelCase__ : Tuple = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
lowerCAmelCase__ : List[Any] = AUDIO_EXTENSIONS
| 699 | 0 |
class __snake_case :
def __init__( self , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = val
snake_case__ : Dict = None
snake_case__ : Tuple = None
def __a ( self , __UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
snake_case__ : int = Node(__UpperCamelCase )
else:
self.left.insert(__UpperCamelCase )
elif val > self.val:
if self.right is None:
snake_case__ : List[str] = Node(__UpperCamelCase )
else:
self.right.insert(__UpperCamelCase )
else:
snake_case__ : Union[str, Any] = val
def UpperCamelCase__ ( A__ , A__ ) -> Any:
# Recursive traversal
if root:
inorder(root.left , A__ )
res.append(root.val )
inorder(root.right , A__ )
def UpperCamelCase__ ( A__ ) -> Optional[Any]:
# Build BST
if len(A__ ) == 0:
return arr
snake_case__ : Optional[Any] = Node(arr[0] )
for i in range(1 , len(A__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
snake_case__ : Dict = []
inorder(A__ , A__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 715 | import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = IFInpaintingPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
return self._get_dummy_components()
def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> str:
'''simple docstring'''
if str(__UpperCamelCase ).startswith('mps' ):
snake_case__ : int = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ : Union[str, Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __a ( self ) -> List[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __a ( self ) -> List[str]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __a ( self ) -> List[str]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __a ( self ) -> int:
'''simple docstring'''
self._test_save_load_local()
def __a ( self ) -> List[str]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 699 | 0 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowerCAmelCase__ : int = logging.get_logger(__name__)
lowerCAmelCase__ : Any = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
lowerCAmelCase__ : int = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]:
for attribute in key.split('.' ):
snake_case__ : Tuple = getattr(A__ , A__ )
if weight_type is not None:
snake_case__ : Any = getattr(A__ , A__ ).shape
else:
snake_case__ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case__ : int = value
elif weight_type == "weight_g":
snake_case__ : Optional[Any] = value
elif weight_type == "weight_v":
snake_case__ : Any = value
elif weight_type == "bias":
snake_case__ : List[str] = value
else:
snake_case__ : Optional[int] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCamelCase__ ( A__ , A__ ) -> Dict:
snake_case__ : str = []
snake_case__ : str = fairseq_model.state_dict()
snake_case__ : Dict = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == 'group' , )
snake_case__ : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ : int = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case__ : Tuple = True
if "*" in mapped_key:
snake_case__ : Any = name.split(A__ )[0].split('.' )[-2]
snake_case__ : List[Any] = mapped_key.replace('*' , A__ )
if "weight_g" in name:
snake_case__ : Optional[int] = 'weight_g'
elif "weight_v" in name:
snake_case__ : Optional[int] = 'weight_v'
elif "bias" in name:
snake_case__ : int = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Union[str, Any] = 'weight'
else:
snake_case__ : List[Any] = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ ) -> int:
snake_case__ : List[str] = full_name.split('conv_layers.' )[-1]
snake_case__ : Any = name.split('.' )
snake_case__ : List[str] = int(items[0] )
snake_case__ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case__ : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case__ : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case__ : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case__ : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(A__ )
@torch.no_grad()
def UpperCamelCase__ ( A__ , A__ , A__=None , A__=None , A__=True ) -> Any:
if config_path is not None:
snake_case__ : int = UniSpeechSatConfig.from_pretrained(A__ )
else:
snake_case__ : Any = UniSpeechSatConfig()
snake_case__ : Tuple = ''
if is_finetuned:
snake_case__ : Tuple = UniSpeechSatForCTC(A__ )
else:
snake_case__ : Optional[int] = UniSpeechSatForPreTraining(A__ )
snake_case__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
snake_case__ : List[str] = model[0].eval()
recursively_load_weights(A__ , A__ )
hf_wavavec.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCAmelCase__ : Dict = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 716 | import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : List[Any] = '''▁'''
lowerCAmelCase__ : int = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class __snake_case ( _lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = BertGenerationTokenizer
__lowerCamelCase = False
__lowerCamelCase = True
def __a ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
snake_case__ : str = BertGenerationTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[str] = '<s>'
snake_case__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(__UpperCamelCase ) , 1002 )
def __a ( self ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[Any] = BertGenerationTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
snake_case__ : int = tokenizer.tokenize('This is a test' )
self.assertListEqual(__UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [285, 46, 10, 170, 382] , )
snake_case__ : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
snake_case__ : Optional[Any] = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case__ : int = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def __a ( self ) -> Dict:
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : int = 'Hello World!'
snake_case__ : Union[str, Any] = [18536, 2260, 101]
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@slow
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : str = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
snake_case__ : List[Any] = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@require_torch
@slow
def __a ( self ) -> List[str]:
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
snake_case__ : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case__ : Optional[int] = ' '.join(__UpperCamelCase )
snake_case__ : int = self.big_tokenizer.encode_plus(__UpperCamelCase , return_tensors='pt' , return_token_type_ids=__UpperCamelCase )
snake_case__ : Tuple = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=__UpperCamelCase )
snake_case__ : Dict = BertGenerationConfig()
snake_case__ : List[str] = BertGenerationEncoder(__UpperCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCamelCase )
model(**__UpperCamelCase )
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 699 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Tuple = logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] = '''▁'''
lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ : Optional[Any] = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
lowerCAmelCase__ : str = {
'''facebook/xglm-564M''': 20_48,
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCamelCase , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase = None , **__UpperCamelCase , ) -> None:
'''simple docstring'''
snake_case__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case__ : Tuple = 7
snake_case__ : Dict = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case__ : Union[str, Any] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
snake_case__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
snake_case__ : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case__ : Tuple = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case__ : Tuple = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
snake_case__ : List[Any] = len(self.sp_model )
snake_case__ : Optional[Any] = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__UpperCamelCase )
snake_case__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = self.__dict__.copy()
snake_case__ : Optional[Any] = None
snake_case__ : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case__ : Any = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case__ : str = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __a ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase ))
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase ))
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case__ : int = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __a ( self ) -> Tuple:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def __a ( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case__ : Optional[Any] = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __a ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
snake_case__ : int = ''.join(__UpperCamelCase ).replace(__UpperCamelCase , ' ' ).strip()
return out_string
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ : List[str] = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , 'wb' ) as fi:
snake_case__ : Any = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 717 | import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCAmelCase__ : List[str] = HfApi()
lowerCAmelCase__ : str = {}
# fmt: off
lowerCAmelCase__ : int = torch.tensor([
-0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67,
1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89,
-1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39,
0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57
])
lowerCAmelCase__ : Dict = torch.tensor([
-2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36,
1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08,
-2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48,
2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65
])
lowerCAmelCase__ : Dict = torch.tensor([
-0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69,
-0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04,
-0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25,
0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43
])
lowerCAmelCase__ : List[str] = torch.tensor([
0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72,
-0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09,
0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05,
-0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05
])
lowerCAmelCase__ : Union[str, Any] = torch.tensor([
0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33,
-0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95,
0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59,
-0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86
])
lowerCAmelCase__ : List[Any] = torch.tensor([
0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78,
-0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30,
0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83,
-0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31
])
lowerCAmelCase__ : Optional[Any] = torch.tensor([
0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42,
-0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98,
0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74,
-0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90
])
lowerCAmelCase__ : List[str] = torch.tensor([
0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42,
-0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90,
0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46,
-0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73
])
lowerCAmelCase__ : List[str] = torch.tensor([
-1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30,
1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43,
-2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10,
1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51])
lowerCAmelCase__ : List[Any] = torch.tensor([
-1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24,
0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81,
-2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59,
1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66
])
lowerCAmelCase__ : Tuple = torch.tensor([
-1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12,
0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27,
-2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31,
1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55
])
lowerCAmelCase__ : List[str] = torch.tensor([
-2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59,
1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51,
-3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41,
3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66
])
lowerCAmelCase__ : Dict = torch.tensor([
-2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40,
1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98,
-2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95,
2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43
])
lowerCAmelCase__ : Dict = torch.tensor([
-2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36,
1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08,
-3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60,
3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43
])
lowerCAmelCase__ : Any = torch.tensor([
-1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44,
1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91,
-2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39,
1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19
])
# fmt: on
lowerCAmelCase__ : Any = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCAmelCase__ : List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith('''CompVis'''):
lowerCAmelCase__ : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
lowerCAmelCase__ : str = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCAmelCase__ : Any = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCAmelCase__ : List[str] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowerCAmelCase__ : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 699 | 0 |
import numpy as np
lowerCAmelCase__ : Any = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class __snake_case :
def __init__( self ) -> None:
'''simple docstring'''
snake_case__ : List[str] = np.array(__UpperCamelCase )
def __a ( self , __UpperCamelCase ) -> np.ndarray:
'''simple docstring'''
snake_case__ : Union[str, Any] = np.where(letter == self.SQUARE )
snake_case__ : int = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> str:
'''simple docstring'''
snake_case__ : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __a ( self , __UpperCamelCase ) -> str:
'''simple docstring'''
snake_case__ : Any = message.lower()
snake_case__ : Optional[Any] = message.replace(' ' , '' )
snake_case__ : int = message.replace('j' , 'i' )
snake_case__ : Optional[int] = np.empty((2, len(__UpperCamelCase )) )
for letter_index in range(len(__UpperCamelCase ) ):
snake_case__ : Optional[int] = self.letter_to_numbers(message[letter_index] )
snake_case__ : List[str] = numbers[0]
snake_case__ : Union[str, Any] = numbers[1]
snake_case__ : Optional[Any] = first_step.reshape(2 * len(__UpperCamelCase ) )
snake_case__ : Dict = ''
for numbers_index in range(len(__UpperCamelCase ) ):
snake_case__ : Dict = int(second_step[numbers_index * 2] )
snake_case__ : str = int(second_step[(numbers_index * 2) + 1] )
snake_case__ : Dict = self.numbers_to_letter(__UpperCamelCase , __UpperCamelCase )
snake_case__ : List[str] = encoded_message + letter
return encoded_message
def __a ( self , __UpperCamelCase ) -> str:
'''simple docstring'''
snake_case__ : Dict = message.lower()
message.replace(' ' , '' )
snake_case__ : str = np.empty(2 * len(__UpperCamelCase ) )
for letter_index in range(len(__UpperCamelCase ) ):
snake_case__ : Tuple = self.letter_to_numbers(message[letter_index] )
snake_case__ : Any = numbers[0]
snake_case__ : List[Any] = numbers[1]
snake_case__ : Any = first_step.reshape((2, len(__UpperCamelCase )) )
snake_case__ : Tuple = ''
for numbers_index in range(len(__UpperCamelCase ) ):
snake_case__ : Union[str, Any] = int(second_step[0, numbers_index] )
snake_case__ : Any = int(second_step[1, numbers_index] )
snake_case__ : Optional[int] = self.numbers_to_letter(__UpperCamelCase , __UpperCamelCase )
snake_case__ : Tuple = decoded_message + letter
return decoded_message
| 718 | import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
class __snake_case ( _lowerCamelCase ):
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
| 699 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@slow
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[Any] = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
snake_case__ : Union[str, Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
snake_case__ : Optional[Any] = model(__UpperCamelCase )['last_hidden_state']
snake_case__ : Union[str, Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __UpperCamelCase )
# compare the actual values for a slice.
snake_case__ : Optional[int] = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 719 | import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __snake_case ( datasets.BuilderConfig ):
__lowerCamelCase = None
__lowerCamelCase = "utf-8"
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = True # deprecated
__lowerCamelCase = None # deprecated
__lowerCamelCase = 10 << 20 # 10MB
__lowerCamelCase = None
class __snake_case ( datasets.ArrowBasedBuilder ):
__lowerCamelCase = JsonConfig
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' )
snake_case__ : str = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' )
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' )
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
snake_case__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__UpperCamelCase , (str, list, tuple) ):
snake_case__ : Any = data_files
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case__ : Optional[Any] = [files]
snake_case__ : List[str] = [dl_manager.iter_files(__UpperCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
snake_case__ : List[Any] = []
for split_name, files in data_files.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case__ : List[Any] = [files]
snake_case__ : Any = [dl_manager.iter_files(__UpperCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__UpperCamelCase , gen_kwargs={'files': files} ) )
return splits
def __a ( self , __UpperCamelCase ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
snake_case__ : List[Any] = self.config.features.arrow_schema.field(__UpperCamelCase ).type
snake_case__ : List[str] = pa_table.append_column(__UpperCamelCase , pa.array([None] * len(__UpperCamelCase ) , type=__UpperCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case__ : List[str] = table_cast(__UpperCamelCase , self.config.features.arrow_schema )
return pa_table
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
snake_case__ : Union[str, Any] = json.load(__UpperCamelCase )
# We keep only the field we are interested in
snake_case__ : Tuple = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__UpperCamelCase , (list, tuple) ):
snake_case__ : List[Any] = set().union(*[row.keys() for row in dataset] )
snake_case__ : List[Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys}
else:
snake_case__ : List[Any] = dataset
snake_case__ : Dict = pa.Table.from_pydict(__UpperCamelCase )
yield file_idx, self._cast_table(__UpperCamelCase )
# If the file has one json object per line
else:
with open(__UpperCamelCase , 'rb' ) as f:
snake_case__ : Optional[int] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
snake_case__ : Tuple = max(self.config.chunksize // 32 , 16 << 10 )
snake_case__ : Optional[Any] = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
snake_case__ : Optional[int] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__UpperCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
snake_case__ : int = batch.decode(self.config.encoding , errors=__UpperCamelCase ).encode('utf-8' )
try:
while True:
try:
snake_case__ : List[str] = paj.read_json(
io.BytesIO(__UpperCamelCase ) , read_options=paj.ReadOptions(block_size=__UpperCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__UpperCamelCase , pa.ArrowInvalid )
and "straddling" not in str(__UpperCamelCase )
or block_size > len(__UpperCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(__UpperCamelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
snake_case__ : Tuple = json.load(__UpperCamelCase )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__UpperCamelCase , __UpperCamelCase ): # list is the only sequence type supported in JSON
try:
snake_case__ : str = set().union(*[row.keys() for row in dataset] )
snake_case__ : Union[str, Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys}
snake_case__ : List[str] = pa.Table.from_pydict(__UpperCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(__UpperCamelCase )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__UpperCamelCase )
batch_idx += 1
| 699 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase__ : List[Any] = 16
lowerCAmelCase__ : int = 32
def UpperCamelCase__ ( A__ , A__ = 16 ) -> Optional[Any]:
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
snake_case__ : str = load_dataset('glue' , 'mrpc' )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : Optional[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case__ : Optional[Any] = datasets.map(
A__ , batched=A__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ : int = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case__ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case__ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
snake_case__ : Union[str, Any] = 8
else:
snake_case__ : Any = None
return tokenizer.pad(
A__ , padding='longest' , max_length=A__ , pad_to_multiple_of=A__ , return_tensors='pt' , )
# Instantiate dataloaders.
snake_case__ : Any = DataLoader(
tokenized_datasets['train'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
snake_case__ : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase__ : List[Any] = mocked_dataloaders # noqa: F811
def UpperCamelCase__ ( A__ , A__ ) -> Union[str, Any]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , A__ ) == "1":
snake_case__ : List[str] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
snake_case__ : int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
snake_case__ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ : Tuple = config['lr']
snake_case__ : Any = int(config['num_epochs'] )
snake_case__ : int = int(config['seed'] )
snake_case__ : int = int(config['batch_size'] )
set_seed(A__ )
snake_case__ : str = get_dataloaders(A__ , A__ )
snake_case__ : List[str] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
snake_case__ : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case__ : Optional[int] = batch_size // MAX_GPU_BATCH_SIZE
snake_case__ : Dict = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=A__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ : str = model.to(accelerator.device )
# Instantiate optimizer
snake_case__ : Optional[int] = AdamW(params=model.parameters() , lr=A__ )
# Instantiate scheduler
snake_case__ : List[str] = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=100 , num_training_steps=(len(A__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ : Tuple = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
snake_case__ : Optional[Any] = os.path.split(A__ )[-1].split('.' )[0]
accelerator.init_trackers(A__ , A__ )
# Now we train the model
for epoch in range(A__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
snake_case__ : Any = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case__ : Optional[int] = model(**A__ )
snake_case__ : str = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
snake_case__ : Any = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : Tuple = model(**A__ )
snake_case__ : str = outputs.logits.argmax(dim=-1 )
snake_case__ : Any = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=A__ , references=A__ , )
snake_case__ : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , A__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'accuracy': eval_metric['accuracy'],
'f1': eval_metric['f1'],
'train_loss': total_loss.item() / len(A__ ),
'epoch': epoch,
} , step=A__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def UpperCamelCase__ ( ) -> List[str]:
snake_case__ : str = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=A__ , default=A__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=A__ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
snake_case__ : List[Any] = parser.parse_args()
snake_case__ : List[str] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 720 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ : Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 699 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ : List[str] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Union[str, Any] = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 721 | from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCAmelCase__ : Dict = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCAmelCase__ : List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCAmelCase__ : List[str] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def UpperCamelCase__ ( A__ , A__ ) -> tuple[str, float]:
snake_case__ : Tuple = len([g for position, g in enumerate(A__ ) if g == main_target[position]] )
return (item, float(A__ ))
def UpperCamelCase__ ( A__ , A__ ) -> tuple[str, str]:
snake_case__ : str = random.randint(0 , len(A__ ) - 1 )
snake_case__ : int = parent_a[:random_slice] + parent_a[random_slice:]
snake_case__ : Any = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def UpperCamelCase__ ( A__ , A__ ) -> str:
snake_case__ : List[Any] = list(A__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
snake_case__ : Optional[Any] = random.choice(A__ )
return "".join(A__ )
def UpperCamelCase__ ( A__ , A__ , A__ , ) -> list[str]:
snake_case__ : Tuple = []
# Generate more children proportionally to the fitness score.
snake_case__ : Optional[Any] = int(parent_a[1] * 100 ) + 1
snake_case__ : str = 10 if child_n >= 10 else child_n
for _ in range(A__ ):
snake_case__ : Any = population_score[random.randint(0 , A__ )][0]
snake_case__ , snake_case__ : int = crossover(parent_a[0] , A__ )
# Append new string to the population list.
pop.append(mutate(A__ , A__ ) )
pop.append(mutate(A__ , A__ ) )
return pop
def UpperCamelCase__ ( A__ , A__ , A__ = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
snake_case__ : Union[str, Any] = F"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(A__ )
# Verify that the target contains no genes besides the ones inside genes variable.
snake_case__ : Tuple = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
snake_case__ : int = F"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(A__ )
# Generate random starting population.
snake_case__ : Union[str, Any] = []
for _ in range(A__ ):
population.append(''.join([random.choice(A__ ) for i in range(len(A__ ) )] ) )
# Just some logs to know what the algorithms is doing.
snake_case__ , snake_case__ : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(A__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
snake_case__ : List[Any] = [evaluate(A__ , A__ ) for item in population]
# Check if there is a matching evolution.
snake_case__ : int = sorted(A__ , key=lambda A__ : x[1] , reverse=A__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"""\nGeneration: {generation}"""
F"""\nTotal Population:{total_population}"""
F"""\nBest score: {population_score[0][1]}"""
F"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
snake_case__ : Optional[int] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(A__ )
# Normalize population score to be between 0 and 1.
snake_case__ : str = [
(item, score / len(A__ )) for item, score in population_score
]
# This is selection
for i in range(A__ ):
population.extend(select(population_score[int(A__ )] , A__ , A__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(A__ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCAmelCase__ : str = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
lowerCAmelCase__ : Optional[Any] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ : List[str] = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 699 | 0 |