code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ : Optional[Any] = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : int = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 346 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 10 ) -> str:
if not isinstance(lowercase , lowercase ) or n < 0:
raise ValueError("Invalid input" )
_a = 10**n
_a = 2_8433 * (pow(2 , 783_0457 , lowercase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""")
| 346 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __a : Any , __a : Optional[int]=7 , __a : Union[str, Any]=3 , __a : int=18 , __a : List[str]=30 , __a : Any=4_00 , __a : Optional[int]=True , __a : List[Any]=None , __a : int=True , ):
_a = size if size is not None else {"height": 18, "width": 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
def UpperCamelCase__ ( self : int ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =ImageGPTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self : Optional[Any] ):
_a = ImageGPTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "clusters" ) )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "size" ) )
self.assertTrue(hasattr(__a , "do_normalize" ) )
def UpperCamelCase__ ( self : Dict ):
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def UpperCamelCase__ ( self : int ):
_a = self.image_processing_class(**self.image_processor_dict )
_a = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(__a , obj[key] ) )
else:
self.assertEqual(obj[key] , __a )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(__a , "image_processor.json" )
image_processor_first.to_json_file(__a )
_a = self.image_processing_class.from_json_file(__a ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__a , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __a )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(__a )
_a = self.image_processing_class.from_pretrained(__a ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__a , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __a )
@unittest.skip("ImageGPT requires clusters at initialization" )
def UpperCamelCase__ ( self : Optional[int] ):
pass
def _lowerCamelCase ( ) -> str:
_a = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
_a = Image.open(dataset[4]["file"] )
_a = Image.open(dataset[5]["file"] )
_a = [imagea, imagea]
return images
@require_vision
@require_torch
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self : str ):
_a = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
_a = prepare_images()
# test non-batched
_a = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
_a = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , __a )
# test batched
_a = image_processing(__a , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
_a = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , __a )
| 346 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 6008_5147_5143 ) -> int:
try:
_a = int(lowercase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
_a = 2
_a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_a = i
while n % i == 0:
_a = n // i
i += 1
return int(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 346 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='timm_backbone'
def __init__( self : str , __a : List[Any]=None , __a : Any=3 , __a : List[Any]=True , __a : int=True , __a : List[Any]=None , **__a : Optional[Any] , ):
super().__init__(**__a )
_a = backbone
_a = num_channels
_a = features_only
_a = use_pretrained_backbone
_a = True
_a = out_indices if out_indices is not None else (-1,)
| 346 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
lowerCAmelCase_ : List[Any] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = {'facebook/bart-base': BartForConditionalGeneration}
lowerCAmelCase_ : int = {'facebook/bart-base': BartTokenizer}
def _lowerCamelCase ( ) -> Union[str, Any]:
_a = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=lowercase , default=lowercase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=lowercase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=lowercase , default=lowercase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=lowercase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=lowercase , )
parser.add_argument(
"--config_name" , type=lowercase , default=lowercase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=lowercase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=lowercase , default=lowercase , help="Where to store the final ONNX file." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple="cpu" ) -> Optional[Any]:
_a = model_dict[model_name].from_pretrained(lowercase ).to(lowercase )
_a = tokenizer_dict[model_name].from_pretrained(lowercase )
if model_name in ["facebook/bart-base"]:
_a = 0
_a = None
_a = 0
return huggingface_model, tokenizer
def _lowerCamelCase ( lowercase : List[str] , lowercase : Tuple , lowercase : int , lowercase : Any , lowercase : Dict ) -> Any:
model.eval()
_a = None
_a = torch.jit.script(BARTBeamSearchGenerator(lowercase ) )
with torch.no_grad():
_a = "My friends are cool but they eat too many carbs."
_a = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="pt" ).to(model.device )
_a = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=lowercase , max_length=lowercase , early_stopping=lowercase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
lowercase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , lowercase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=lowercase , )
logger.info("Model exported to {}".format(lowercase ) )
_a = remove_dup_initializers(os.path.abspath(lowercase ) )
logger.info("Deduplicated and optimized model written to {}".format(lowercase ) )
_a = onnxruntime.InferenceSession(lowercase )
_a = ort_sess.run(
lowercase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(lowercase ),
"max_length": np.array(lowercase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def _lowerCamelCase ( ) -> Any:
_a = parse_args()
_a = 5
_a = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_a = torch.device(args.device )
_a , _a = load_model_tokenizer(args.model_name_or_path , lowercase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(lowercase )
if args.max_length:
_a = args.max_length
if args.num_beams:
_a = args.num_beams
if args.output_file_path:
_a = args.output_file_path
else:
_a = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(lowercase , lowercase , lowercase , lowercase , lowercase )
if __name__ == "__main__":
main()
| 346 | 1 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def _lowerCamelCase ( lowercase : str ) -> str:
if not sentence:
return ""
_a = dict(zip(lowercase , lowercase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 346 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase_ : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def _lowerCamelCase ( lowercase : str ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase )
def _lowerCamelCase ( lowercase : Dict ) -> str:
from transformers.testing_utils import pytest_terminal_summary_main
_a = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(lowercase , id=lowercase )
| 346 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : Dict ) -> int:
_a = WavaVecaForSequenceClassification.from_pretrained(lowercase , config=lowercase )
_a = downstream_dict["projector.weight"]
_a = downstream_dict["projector.bias"]
_a = downstream_dict["model.post_net.linear.weight"]
_a = downstream_dict["model.post_net.linear.bias"]
return model
def _lowerCamelCase ( lowercase : int , lowercase : List[str] , lowercase : str ) -> List[Any]:
_a = WavaVecaForAudioFrameClassification.from_pretrained(lowercase , config=lowercase )
_a = downstream_dict["model.linear.weight"]
_a = downstream_dict["model.linear.bias"]
return model
def _lowerCamelCase ( lowercase : int , lowercase : Optional[int] , lowercase : Dict ) -> Optional[Any]:
_a = WavaVecaForXVector.from_pretrained(lowercase , config=lowercase )
_a = downstream_dict["connector.weight"]
_a = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_a = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
_a = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
_a = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_a = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_a = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_a = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_a = downstream_dict["objective.W"]
return model
@torch.no_grad()
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Dict , lowercase : List[str] , lowercase : str ) -> Any:
_a = torch.load(lowercase , map_location="cpu" )
_a = checkpoint["Downstream"]
_a = WavaVecaConfig.from_pretrained(lowercase )
_a = WavaVecaFeatureExtractor.from_pretrained(
lowercase , return_attention_mask=lowercase , do_normalize=lowercase )
_a = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_a = convert_classification(lowercase , lowercase , lowercase )
elif arch.endswith("ForAudioFrameClassification" ):
_a = convert_diarization(lowercase , lowercase , lowercase )
elif arch.endswith("ForXVector" ):
_a = convert_xvector(lowercase , lowercase , lowercase )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
_a = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
lowerCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 346 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : List[Any] , __a : int , __a : int , __a : int , __a : float , __a : int , __a : int , __a : int , __a : int , __a : str , __a : bool = False , ):
super().__init__()
_a = nn.Embedding(__a , __a )
_a = nn.Embedding(__a , __a )
_a = False
_a = nn.Dropout(p=__a )
_a = TaConfig(
vocab_size=__a , d_model=__a , num_heads=__a , d_kv=__a , d_ff=__a , dropout_rate=__a , feed_forward_proj=__a , is_decoder=__a , is_encoder_decoder=__a , )
_a = nn.ModuleList()
for lyr_num in range(__a ):
_a = TaBlock(__a )
self.encoders.append(__a )
_a = TaLayerNorm(__a )
_a = nn.Dropout(p=__a )
def UpperCamelCase__ ( self : str , __a : Union[str, Any] , __a : Dict ):
_a = self.token_embedder(__a )
_a = encoder_input_tokens.shape[1]
_a = torch.arange(__a , device=encoder_input_tokens.device )
x += self.position_encoding(__a )
_a = self.dropout_pre(__a )
# inverted the attention mask
_a = encoder_input_tokens.size()
_a = self.get_extended_attention_mask(__a , __a )
for lyr in self.encoders:
_a = lyr(__a , __a )[0]
_a = self.layer_norm(__a )
return self.dropout_post(__a ), encoder_inputs_mask
| 346 | 1 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase_ : str = object()
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[int] ) -> Tuple:
_a = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(lowercase ) - len(lowercase ) + 1 ):
_a = [x.match(lowercase ) for x, y in zip(lowercase , ks[i:] )]
if matches and all(lowercase ):
return True
return False
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Union[str, Any]:
def replace(lowercase : Optional[Any] , lowercase : List[Any] ):
for rule, replacement in rules:
if _match(lowercase , lowercase ):
return replacement
return val
return replace
def _lowerCamelCase ( ) -> str:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , lowercase )),
(("transformer", "wte", "embedding"), P("mp" , lowercase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowercase , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , lowercase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowercase , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , lowercase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Union[str, Any]:
_a = _get_partition_rules()
_a = _replacement_rules(lowercase )
_a = {k: _unmatched for k in flatten_dict(lowercase )}
_a = {k: replace(lowercase , lowercase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowercase ) )
| 346 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Any ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[str] , lowercase : Dict ) -> Union[str, Any]:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
_a = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Dict , lowercase : Dict ) -> str:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple , __a : Optional[int] , __a : Any ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Tuple , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Dict=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : List[Any] , __a : Dict ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : str ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 346 | 1 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Dict , __a : int , __a : Any=13 , __a : Optional[int]=7 , __a : Optional[int]=True , __a : int=True , __a : Any=True , __a : List[str]=True , __a : Tuple=99 , __a : Optional[Any]=32 , __a : Optional[int]=5 , __a : List[Any]=4 , __a : str=37 , __a : str="gelu" , __a : List[str]=0.1 , __a : str=0.1 , __a : int=5_12 , __a : int=16 , __a : List[str]=2 , __a : List[str]=0.02 , __a : str=False , __a : List[str]=True , __a : int="None" , __a : List[str]=3 , __a : Tuple=4 , __a : Any=None , ):
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = relative_attention
_a = position_biased_input
_a = pos_att_type
_a = scope
def UpperCamelCase__ ( self : Any ):
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self : List[str] ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCamelCase__ ( self : Tuple , __a : int ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCamelCase__ ( self : Dict , __a : List[Any] , __a : List[Any] , __a : Tuple , __a : Any , __a : int , __a : Optional[int] , __a : Union[str, Any] ):
_a = DebertaVaModel(config=__a )
model.to(__a )
model.eval()
_a = model(__a , attention_mask=__a , token_type_ids=__a )[0]
_a = model(__a , token_type_ids=__a )[0]
_a = model(__a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCamelCase__ ( self : Tuple , __a : str , __a : str , __a : Dict , __a : Dict , __a : Dict , __a : Tuple , __a : List[str] ):
_a = DebertaVaForMaskedLM(config=__a )
model.to(__a )
model.eval()
_a = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self : str , __a : Optional[int] , __a : Optional[Any] , __a : Dict , __a : List[Any] , __a : Union[str, Any] , __a : List[str] , __a : Dict ):
_a = self.num_labels
_a = DebertaVaForSequenceClassification(__a )
model.to(__a )
model.eval()
_a = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__a )
def UpperCamelCase__ ( self : Dict , __a : Any , __a : str , __a : Any , __a : List[str] , __a : List[str] , __a : int , __a : List[str] ):
_a = self.num_labels
_a = DebertaVaForTokenClassification(config=__a )
model.to(__a )
model.eval()
_a = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self : Dict , __a : int , __a : Union[str, Any] , __a : Optional[int] , __a : List[Any] , __a : Any , __a : Union[str, Any] , __a : Tuple ):
_a = DebertaVaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
_a = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self : Dict , __a : Dict , __a : List[str] , __a : List[Any] , __a : Optional[Any] , __a : Optional[Any] , __a : Any , __a : List[str] ):
_a = DebertaVaForMultipleChoice(config=__a )
model.to(__a )
model.eval()
_a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self : Tuple ):
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__a =(
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a =True
__a =False
__a =False
__a =False
__a =False
def UpperCamelCase__ ( self : int ):
_a = DebertaVaModelTester(self )
_a = ConfigTester(self , config_class=__a , hidden_size=37 )
def UpperCamelCase__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__a )
def UpperCamelCase__ ( self : List[str] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__a )
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__a )
def UpperCamelCase__ ( self : str ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__a )
def UpperCamelCase__ ( self : int ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__a )
def UpperCamelCase__ ( self : Tuple ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__a )
@slow
def UpperCamelCase__ ( self : Dict ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = DebertaVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="Model not available yet" )
def UpperCamelCase__ ( self : Optional[int] ):
pass
@slow
def UpperCamelCase__ ( self : str ):
_a = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" )
_a = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_a = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_a = model(__a , attention_mask=__a )[0]
# compare the actual values for a slice.
_a = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) , f'{output[:, 1:4, 1:4]}' )
| 346 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
lowerCAmelCase_ : str = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
lowerCAmelCase_ : Union[str, Any] = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def _lowerCamelCase ( lowercase : List[str] ) -> List[Any]:
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = numpy_to_pil(lowercase )
return images
def _lowerCamelCase ( lowercase : int ) -> List[Any]:
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
_a = [Image.fromarray(lowercase ) for image in images]
return pil_images
| 346 | 1 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor']
__a ='SamImageProcessor'
def __init__( self : Any , __a : str ):
super().__init__(__a )
_a = self.image_processor
_a = -10
_a = self.image_processor.size["longest_edge"]
def __call__( self : Optional[Any] , __a : List[str]=None , __a : Dict=None , __a : Optional[int]=None , __a : Dict=None , __a : Optional[Union[str, TensorType]] = None , **__a : Optional[Any] , ):
_a = self.image_processor(
__a , return_tensors=__a , **__a , )
# pop arguments that are not used in the foward but used nevertheless
_a = encoding_image_processor["original_sizes"]
if hasattr(__a , "numpy" ): # Checks if Torch or TF tensor
_a = original_sizes.numpy()
_a , _a , _a = self._check_and_preprocess_points(
input_points=__a , input_labels=__a , input_boxes=__a , )
_a = self._normalize_and_convert(
__a , __a , input_points=__a , input_labels=__a , input_boxes=__a , return_tensors=__a , )
return encoding_image_processor
def UpperCamelCase__ ( self : Any , __a : Optional[Any] , __a : Optional[int] , __a : List[str]=None , __a : Union[str, Any]=None , __a : int=None , __a : Tuple="pt" , ):
if input_points is not None:
if len(__a ) != len(__a ):
_a = [
self._normalize_coordinates(self.target_size , __a , original_sizes[0] ) for point in input_points
]
else:
_a = [
self._normalize_coordinates(self.target_size , __a , __a )
for point, original_size in zip(__a , __a )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_a , _a = self._pad_points_and_labels(__a , __a )
_a = np.array(__a )
if input_labels is not None:
_a = np.array(__a )
if input_boxes is not None:
if len(__a ) != len(__a ):
_a = [
self._normalize_coordinates(self.target_size , __a , original_sizes[0] , is_bounding_box=__a )
for box in input_boxes
]
else:
_a = [
self._normalize_coordinates(self.target_size , __a , __a , is_bounding_box=__a )
for box, original_size in zip(__a , __a )
]
_a = np.array(__a )
if input_boxes is not None:
if return_tensors == "pt":
_a = torch.from_numpy(__a )
# boxes batch size of 1 by default
_a = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_a = tf.convert_to_tensor(__a )
# boxes batch size of 1 by default
_a = tf.expand_dims(__a , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_a = torch.from_numpy(__a )
# point batch size of 1 by default
_a = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_a = tf.convert_to_tensor(__a )
# point batch size of 1 by default
_a = tf.expand_dims(__a , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
_a = torch.from_numpy(__a )
# point batch size of 1 by default
_a = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_a = tf.convert_to_tensor(__a )
# point batch size of 1 by default
_a = tf.expand_dims(__a , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def UpperCamelCase__ ( self : Tuple , __a : Any , __a : List[Any] ):
_a = max([point.shape[0] for point in input_points] )
_a = []
for i, point in enumerate(__a ):
if point.shape[0] != expected_nb_points:
_a = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_a = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(__a )
_a = processed_input_points
return input_points, input_labels
def UpperCamelCase__ ( self : Tuple , __a : int , __a : np.ndarray , __a : Dict , __a : Optional[Any]=False ):
_a , _a = original_size
_a , _a = self.image_processor._get_preprocess_shape(__a , longest_edge=__a )
_a = deepcopy(__a ).astype(__a )
if is_bounding_box:
_a = coords.reshape(-1 , 2 , 2 )
_a = coords[..., 0] * (new_w / old_w)
_a = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_a = coords.reshape(-1 , 4 )
return coords
def UpperCamelCase__ ( self : str , __a : int=None , __a : Optional[Any]=None , __a : int=None , ):
if input_points is not None:
if hasattr(__a , "numpy" ): # Checks for TF or Torch tensor
_a = input_points.numpy().tolist()
if not isinstance(__a , __a ) or not isinstance(input_points[0] , __a ):
raise ValueError("Input points must be a list of list of floating points." )
_a = [np.array(__a ) for input_point in input_points]
else:
_a = None
if input_labels is not None:
if hasattr(__a , "numpy" ):
_a = input_labels.numpy().tolist()
if not isinstance(__a , __a ) or not isinstance(input_labels[0] , __a ):
raise ValueError("Input labels must be a list of list integers." )
_a = [np.array(__a ) for label in input_labels]
else:
_a = None
if input_boxes is not None:
if hasattr(__a , "numpy" ):
_a = input_boxes.numpy().tolist()
if (
not isinstance(__a , __a )
or not isinstance(input_boxes[0] , __a )
or not isinstance(input_boxes[0][0] , __a )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
_a = [np.array(__a ).astype(np.floataa ) for box in input_boxes]
else:
_a = None
return input_points, input_labels, input_boxes
@property
def UpperCamelCase__ ( self : Dict ):
_a = self.image_processor.model_input_names
return list(dict.fromkeys(__a ) )
def UpperCamelCase__ ( self : List[str] , *__a : List[Any] , **__a : Union[str, Any] ):
return self.image_processor.post_process_masks(*__a , **__a )
| 346 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[int]:
_a = 10
_a = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
_a = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(lowercase ) ),
} , features=lowercase , )
return dataset
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : int ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=lowercase )
return filename
# FILE_CONTENT + files
lowerCAmelCase_ : Union[str, Any] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> List[Any]:
_a = tmp_path_factory.mktemp("data" ) / "file.txt"
_a = FILE_CONTENT
with open(lowercase , "w" ) as f:
f.write(lowercase )
return filename
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> str:
import bza
_a = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
_a = bytes(lowercase , "utf-8" )
with bza.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> Optional[Any]:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
_a = bytes(lowercase , "utf-8" )
with gzip.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Union[str, Any]:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_a = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
_a = bytes(lowercase , "utf-8" )
with lza.frame.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple ) -> Optional[Any]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_a = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(lowercase , "w" ) as archive:
archive.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : Optional[Any] ) -> Dict:
import tarfile
_a = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any ) -> Union[str, Any]:
import lzma
_a = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
_a = bytes(lowercase , "utf-8" )
with lzma.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int , lowercase : Any ) -> Union[str, Any]:
import zipfile
_a = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict ) -> List[str]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_a = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
_a = bytes(lowercase , "utf-8" )
with zstd.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> Union[str, Any]:
_a = tmp_path_factory.mktemp("data" ) / "file.xml"
_a = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(lowercase , "w" ) as f:
f.write(lowercase )
return filename
lowerCAmelCase_ : Optional[int] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowerCAmelCase_ : List[Any] = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowerCAmelCase_ : Dict = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase_ : Dict = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowerCAmelCase_ : List[Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> str:
_a = datasets.Dataset.from_dict(lowercase )
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict ) -> Dict:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
_a = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> str:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(lowercase , "w" , newline="" ) as f:
_a = csv.DictWriter(lowercase , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(lowercase , "w" , newline="" ) as f:
_a = csv.DictWriter(lowercase , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> int:
import bza
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(lowercase , "rb" ) as f:
_a = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Any , lowercase : Any ) -> List[str]:
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : Any , lowercase : List[Any] ) -> Dict:
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(lowercase , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Optional[Any] , lowercase : int ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[Any] ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
_a = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(lowercase , "wb" ) as f:
_a = pq.ParquetWriter(lowercase , schema=lowercase )
_a = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase ) )] for k in DATA[0]} , schema=lowercase )
writer.write_table(lowercase )
writer.close()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_a = {"data": DATA}
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_a = {"data": DATA_DICT_OF_LISTS}
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] ) -> str:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> List[str]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> int:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] , lowercase : Dict ) -> Tuple:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(lowercase , "rb" ) as orig_file:
with gzip.open(lowercase , "wb" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : List[Any] ) -> List[Any]:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(lowercase , "rb" ) as orig_file:
with gzip.open(lowercase , "wb" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[Any] , lowercase : int ) -> str:
_a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : int , lowercase : List[Any] ) -> Optional[int]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("nested" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[str] , lowercase : str ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : Any , lowercase : Optional[int] ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str , lowercase : List[str] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.join("nested" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> str:
_a = ["0", "1", "2", "3"]
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> Dict:
_a = ["0", "1", "2", "3"]
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
_a = ["0", "1", "2", "3"]
_a = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Union[str, Any] , lowercase : Any ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : List[str] , lowercase : List[str] ) -> Union[str, Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : int , lowercase : str ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename("unsupported.ext" ) )
f.write(lowercase , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[Any] ) -> Any:
_a = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
_a = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[Any]:
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[int]:
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> Dict:
_a = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> str:
_a = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 346 | 1 |
'''simple docstring'''
from copy import deepcopy
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[str] , __a : list[int] | None = None , __a : int | None = None ):
if arr is None and size is not None:
_a = size
_a = [0] * size
elif arr is not None:
self.init(__a )
else:
raise ValueError("Either arr or size must be specified" )
def UpperCamelCase__ ( self : Optional[int] , __a : list[int] ):
_a = len(__a )
_a = deepcopy(__a )
for i in range(1 , self.size ):
_a = self.next_(__a )
if j < self.size:
self.tree[j] += self.tree[i]
def UpperCamelCase__ ( self : List[str] ):
_a = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
_a = self.next_(__a )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def UpperCamelCase__ ( __a : int ):
return index + (index & (-index))
@staticmethod
def UpperCamelCase__ ( __a : int ):
return index - (index & (-index))
def UpperCamelCase__ ( self : Tuple , __a : int , __a : int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
_a = self.next_(__a )
def UpperCamelCase__ ( self : int , __a : int , __a : int ):
self.add(__a , value - self.get(__a ) )
def UpperCamelCase__ ( self : Union[str, Any] , __a : int ):
if right == 0:
return 0
_a = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
_a = self.prev(__a )
return result
def UpperCamelCase__ ( self : List[Any] , __a : int , __a : int ):
return self.prefix(__a ) - self.prefix(__a )
def UpperCamelCase__ ( self : Optional[int] , __a : int ):
return self.query(__a , index + 1 )
def UpperCamelCase__ ( self : Optional[Any] , __a : int ):
value -= self.tree[0]
if value < 0:
return -1
_a = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
_a = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv2ImageProcessor'
__a =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Dict , __a : int=None , __a : List[Any]=None , **__a : str ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Optional[int] , __a : Optional[Any] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Optional[Any] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : int ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : Union[str, Any] , *__a : Optional[int] , **__a : Optional[Any] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : int ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCamelCase__ ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 346 | 1 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase_ : Optional[Any] = get_tests_dir('fixtures')
lowerCAmelCase_ : Optional[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase_ : Optional[Any] = get_tests_dir('fixtures/dummy-config.json')
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : int ):
_a = 0
def UpperCamelCase__ ( self : Dict ):
_a = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
_a = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_a = AutoFeatureExtractor.from_pretrained(__a ).to_dict()
config_dict.pop("feature_extractor_type" )
_a = WavaVecaFeatureExtractor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
_a = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : int ):
with self.assertRaisesRegex(
__a , "bert-base is not a local folder and is not a valid model identifier" ):
_a = AutoFeatureExtractor.from_pretrained("bert-base" )
def UpperCamelCase__ ( self : Tuple ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_a = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" )
def UpperCamelCase__ ( self : str ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
_a = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase__ ( self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def UpperCamelCase__ ( self : List[str] ):
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoFeatureExtractor.register(__a , __a )
# Now that the config is registered, it can be used as any other config with the auto-API
_a = CustomFeatureExtractor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self : List[Any] ):
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =True
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# If remote code is not set, the default is to use local
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 346 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = '▁'
lowerCAmelCase_ : Optional[Any] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
lowerCAmelCase_ : Optional[int] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
lowerCAmelCase_ : List[str] = {
'facebook/s2t-small-librispeech-asr': 10_24,
}
lowerCAmelCase_ : List[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
lowerCAmelCase_ : Union[str, Any] = {'mustc': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =MAX_MODEL_INPUT_SIZES
__a =['input_ids', 'attention_mask']
__a =[]
def __init__( self : Optional[Any] , __a : Optional[Any] , __a : Any , __a : Any="<s>" , __a : List[str]="</s>" , __a : str="<pad>" , __a : List[str]="<unk>" , __a : Union[str, Any]=False , __a : Any=False , __a : List[str]=None , __a : Optional[int]=None , __a : Optional[Dict[str, Any]] = None , **__a : int , ):
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , do_upper_case=__a , do_lower_case=__a , tgt_lang=__a , lang_codes=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_a = do_upper_case
_a = do_lower_case
_a = load_json(__a )
_a = {v: k for k, v in self.encoder.items()}
_a = spm_file
_a = load_spm(__a , self.sp_model_kwargs )
if lang_codes is not None:
_a = lang_codes
_a = LANGUAGES[lang_codes]
_a = [f'<lang:{lang}>' for lang in self.langs]
_a = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
_a = self.lang_tokens
_a = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_a = {}
@property
def UpperCamelCase__ ( self : str ):
return len(self.encoder )
@property
def UpperCamelCase__ ( self : str ):
return self._tgt_lang
@tgt_lang.setter
def UpperCamelCase__ ( self : Optional[int] , __a : Any ):
_a = new_tgt_lang
self.set_tgt_lang_special_tokens(__a )
def UpperCamelCase__ ( self : List[Any] , __a : str ):
_a = self.lang_code_to_id[tgt_lang]
_a = [lang_code_id]
def UpperCamelCase__ ( self : Dict , __a : str ):
return self.sp_model.encode(__a , out_type=__a )
def UpperCamelCase__ ( self : List[str] , __a : Any ):
return self.encoder.get(__a , self.encoder[self.unk_token] )
def UpperCamelCase__ ( self : str , __a : int ):
return self.decoder.get(__a , self.unk_token )
def UpperCamelCase__ ( self : str , __a : List[str] ):
_a = []
_a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_a = self.sp_model.decode(__a )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_a = []
else:
current_sub_tokens.append(__a )
_a = self.sp_model.decode(__a )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCamelCase__ ( self : int , __a : Any , __a : int=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self : Any , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
_a = [1] * len(self.prefix_tokens )
_a = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self : str , __a : Dict ):
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase__ ( self : List[str] , __a : str , __a : Optional[str] = None ):
_a = Path(__a )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __a )
if os.path.abspath(self.spm_file ) != os.path.abspath(__a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __a )
elif not os.path.isfile(self.spm_file ):
with open(__a , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__a )
return (str(__a ), str(__a ))
def _lowerCamelCase ( lowercase : str , lowercase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_a = sentencepiece.SentencePieceProcessor(**lowercase )
spm.Load(str(lowercase ) )
return spm
def _lowerCamelCase ( lowercase : str ) -> Union[Dict, List]:
with open(lowercase , "r" ) as f:
return json.load(lowercase )
def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> None:
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase , indent=2 )
| 346 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ : Optional[Any] = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[Any] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 346 |
'''simple docstring'''
from manim import *
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Dict ):
_a = Rectangle(height=0.5 , width=0.5 )
_a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_a = [mem.copy() for i in range(6 )]
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(__a , __a ).arrange(__a , buff=0 )
_a = Text("CPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
_a = [mem.copy() for i in range(4 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("GPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Model" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
_a = []
for i, rect in enumerate(__a ):
rect.set_stroke(__a )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_a = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__a , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__a , buff=0.0 )
self.add(__a )
cpu_targs.append(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Loaded Checkpoint" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , aligned_edge=__a , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
_a = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_a = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a ) , Write(__a ) )
self.play(Write(__a , run_time=1 ) , Create(__a , run_time=1 ) )
_a = []
_a = []
for i, rect in enumerate(__a ):
_a = fill.copy().set_fill(__a , opacity=0.7 )
target.move_to(__a )
first_animations.append(GrowFromCenter(__a , run_time=1 ) )
_a = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(*__a )
self.wait()
| 346 | 1 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : Dict ) -> List[str]:
if not head:
return True
# split the list to two parts
_a , _a = head.next, head
while fast and fast.next:
_a = fast.next.next
_a = slow.next
_a = slow.next
_a = None # Don't forget here! But forget still works!
# reverse the second part
_a = None
while second:
_a = second.next
_a = node
_a = second
_a = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_a = node.next
_a = head.next
return True
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[Any]:
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_a = _a = _a = head
while fast and fast.next:
_a , _a = fast.next.next, slow.next
# 2. Push the second half into the stack
_a = [slow.val]
while slow.next:
_a = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_a = cur.next
return True
def _lowerCamelCase ( lowercase : Optional[int] ) -> Any:
if not head or not head.next:
return True
_a = {}
_a = 0
while head:
if head.val in d:
d[head.val].append(lowercase )
else:
_a = [pos]
_a = head.next
pos += 1
_a = pos - 1
_a = 0
for v in d.values():
if len(lowercase ) % 2 != 0:
middle += 1
else:
_a = 0
for i in range(0 , len(lowercase ) ):
if v[i] + v[len(lowercase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 346 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowerCAmelCase_ : Tuple = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def _lowerCamelCase ( lowercase : List[Any] ) -> Optional[int]:
_a = test_results.split(" " )
_a = 0
_a = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_a = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCamelCase ( lowercase : str ) -> Optional[Any]:
_a = {}
_a = None
_a = False
for line in failures_short_lines.split("\n" ):
if re.search(r"_ \[doctest\]" , lowercase ):
_a = True
_a = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
_a = line
_a = False
return failures
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple , __a : str , __a : Dict ):
_a = title
_a = doc_test_results["time_spent"].split("," )[0]
_a = doc_test_results["success"]
_a = doc_test_results["failures"]
_a = self.n_success + self.n_failures
# Failures and success of the modeling tests
_a = doc_test_results
@property
def UpperCamelCase__ ( self : int ):
_a = [self._time_spent]
_a = 0
for time in time_spent:
_a = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__a ) == 1:
_a = [0, 0, time_parts[0]]
_a , _a , _a = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_a , _a , _a = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(__a )}h{int(__a )}m{int(__a )}s'
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : str ):
_a = 40
_a = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(__a , __a )}
_a = ""
for category, failures in category_failures.items():
if len(__a ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
_a = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__a )
@staticmethod
def UpperCamelCase__ ( ):
_a = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(__a )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=__a , )
def UpperCamelCase__ ( self : Tuple ):
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
_a = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
_a = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=__a , )
def UpperCamelCase__ ( self : Dict , __a : List[str] , __a : List[Any] , __a : Tuple , __a : int ):
_a = ""
for key, value in failures.items():
_a = value[:2_00] + " [Truncated]" if len(__a ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
_a = job_name
_a = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
_a = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCamelCase__ ( self : str ):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
_a = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
_a = sorted(self.doc_test_results.items() , key=lambda __a : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
_a = f'*Num failures* :{len(job_result["failed"] )} \n'
_a = job_result["failures"]
_a = self.get_reply_blocks(__a , __a , __a , text=__a )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=f'Results for {job}' , blocks=__a , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def _lowerCamelCase ( ) -> Any:
_a = os.environ["GITHUB_RUN_ID"]
_a = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
_a = requests.get(lowercase ).json()
_a = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_a = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowercase ):
_a = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase )
return {}
def _lowerCamelCase ( lowercase : str ) -> Dict:
_a = {}
if os.path.exists(lowercase ):
_a = os.listdir(lowercase )
for file in files:
try:
with open(os.path.join(lowercase , lowercase ) , encoding="utf-8" ) as f:
_a = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase , lowercase )}.' ) from e
return _artifact
def _lowerCamelCase ( ) -> str:
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , __a : str ):
_a = name
_a = []
def __str__( self : List[str] ):
return self.name
def UpperCamelCase__ ( self : str , __a : str ):
self.paths.append({"name": self.name, "path": path} )
_a = {}
_a = filter(os.path.isdir , os.listdir() )
for directory in directories:
_a = directory
if artifact_name not in _available_artifacts:
_a = Artifact(lowercase )
_available_artifacts[artifact_name].add_path(lowercase )
return _available_artifacts
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = get_job_links()
lowerCAmelCase_ : Any = retrieve_available_artifacts()
lowerCAmelCase_ : List[str] = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowerCAmelCase_ : Optional[Any] = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowerCAmelCase_ : int = github_actions_job_links.get('run_doctests')
lowerCAmelCase_ : Union[str, Any] = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
lowerCAmelCase_ : List[str] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = handle_test_results(artifact['stats'])
lowerCAmelCase_ : List[str] = failed
lowerCAmelCase_ : Optional[Any] = success
lowerCAmelCase_ : Tuple = time_spent[1:-1] + ', '
lowerCAmelCase_ : List[Any] = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
lowerCAmelCase_ : int = line.replace('FAILED ', '')
lowerCAmelCase_ : Optional[int] = line.split()[0].replace('\n', '')
if "::" in line:
lowerCAmelCase_ , lowerCAmelCase_ : str = line.split('::')
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowerCAmelCase_ : Union[str, Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowerCAmelCase_ : List[str] = all_failures[test] if test in all_failures else 'N/A'
lowerCAmelCase_ : Optional[Any] = failure
break
lowerCAmelCase_ : Tuple = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 346 | 1 |
'''simple docstring'''
from PIL import Image
def _lowerCamelCase ( lowercase : Image , lowercase : int ) -> Image:
_a = (259 * (level + 255)) / (255 * (259 - level))
def contrast(lowercase : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(lowercase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
lowerCAmelCase_ : int = change_contrast(img, 1_70)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 346 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCamelCase ( ) -> str:
_a = HfArgumentParser(lowercase )
_a = parser.parse_args_into_dataclasses()[0]
_a = TensorFlowBenchmark(args=lowercase )
try:
_a = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_a = "Arg --no_{0} is no longer used, please use --no-{0} instead."
_a = " ".join(str(lowercase ).split(" " )[:-1] )
_a = ""
_a = eval(str(lowercase ).split(" " )[-1] )
_a = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase )
if len(lowercase ) > 0:
_a = full_error_msg + begin_error_msg + str(lowercase )
raise ValueError(lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 346 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase_ : int = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
__a ='maskformer-swin'
__a ={
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[Any] , __a : Optional[Any]=2_24 , __a : Any=4 , __a : Tuple=3 , __a : Optional[int]=96 , __a : Any=[2, 2, 6, 2] , __a : Tuple=[3, 6, 12, 24] , __a : str=7 , __a : str=4.0 , __a : List[Any]=True , __a : Union[str, Any]=0.0 , __a : Any=0.0 , __a : Optional[int]=0.1 , __a : List[Any]="gelu" , __a : int=False , __a : str=0.02 , __a : Tuple=1e-5 , __a : Optional[int]=None , __a : List[str]=None , **__a : Optional[Any] , ):
super().__init__(**__a )
_a = image_size
_a = patch_size
_a = num_channels
_a = embed_dim
_a = depths
_a = len(__a )
_a = num_heads
_a = window_size
_a = mlp_ratio
_a = qkv_bias
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = drop_path_rate
_a = hidden_act
_a = use_absolute_embeddings
_a = layer_norm_eps
_a = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_a = int(embed_dim * 2 ** (len(__a ) - 1) )
_a = ["stem"] + [f'stage{idx}' for idx in range(1 , len(__a ) + 1 )]
_a , _a = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 346 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase_ : Union[str, Any] = None
try:
import msvcrt
except ImportError:
lowerCAmelCase_ : Tuple = None
try:
import fcntl
except ImportError:
lowerCAmelCase_ : Optional[int] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase_ : Any = OSError
# Data
# ------------------------------------------------
lowerCAmelCase_ : Tuple = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
lowerCAmelCase_ : Optional[int] = '3.0.12'
lowerCAmelCase_ : Tuple = None
def _lowerCamelCase ( ) -> Optional[int]:
global _logger
_a = _logger or logging.getLogger(__name__ )
return _logger
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Dict , __a : Optional[Any] ):
_a = lock_file
return None
def __str__( self : Any ):
_a = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[Any] , __a : Optional[int] ):
_a = lock
return None
def __enter__( self : str ):
return self.lock
def __exit__( self : List[Any] , __a : List[Any] , __a : Union[str, Any] , __a : Dict ):
self.lock.release()
return None
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : Union[str, Any] , __a : Optional[int]=-1 , __a : Tuple=None ):
_a = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
_a = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
_a = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_a = None
# The default timeout value.
_a = timeout
# We use this lock primarily for the lock counter.
_a = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_a = 0
return None
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self._lock_file
@property
def UpperCamelCase__ ( self : List[Any] ):
return self._timeout
@timeout.setter
def UpperCamelCase__ ( self : int , __a : List[Any] ):
_a = float(__a )
return None
def UpperCamelCase__ ( self : Dict ):
raise NotImplementedError()
def UpperCamelCase__ ( self : str ):
raise NotImplementedError()
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self._lock_file_fd is not None
def UpperCamelCase__ ( self : int , __a : int=None , __a : Tuple=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
_a = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_a = id(self )
_a = self._lock_file
_a = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_a = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCamelCase__ ( self : Union[str, Any] , __a : int=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_a = id(self )
_a = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
_a = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self : List[Any] ):
self.acquire()
return self
def __exit__( self : str , __a : str , __a : Dict , __a : Dict ):
self.release()
return None
def __del__( self : int ):
self.release(force=__a )
return None
def UpperCamelCase__ ( self : Tuple , __a : str , __a : int ):
_a = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
_a = os.path.dirname(__a )
_a = str(hash(__a ) )
_a = filename[: max_length - len(__a ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(__a , __a )
else:
return path
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : int , __a : str , __a : List[Any]=-1 , __a : List[Any]=None ):
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
_a = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def UpperCamelCase__ ( self : int ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self._lock_file_fd
_a = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , __a : Optional[Any] , __a : Union[str, Any]=-1 , __a : int=None ):
_a = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def UpperCamelCase__ ( self : Any ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_a = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : Tuple ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_a = self._lock_file_fd
_a = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
_a = fd
return None
def UpperCamelCase__ ( self : Union[str, Any] ):
os.close(self._lock_file_fd )
_a = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase_ : str = None
if msvcrt:
lowerCAmelCase_ : List[str] = WindowsFileLock
elif fcntl:
lowerCAmelCase_ : List[str] = UnixFileLock
else:
lowerCAmelCase_ : int = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 346 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def _lowerCamelCase ( lowercase : int ) -> list[int]:
if num <= 0:
_a = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(lowercase )
_a = [True] * (num + 1)
_a = []
_a = 2
_a = int(math.sqrt(lowercase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowercase )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowercase ):
if sieve[i] is True:
_a = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowercase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 346 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42
__a =42
__a =42
__a =42
__a =42
def UpperCamelCase__ ( self : str ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase__ ( self : List[str] ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = torch.arange(self.height * self.width )
_a = torch.stack(
[
pixel_indices % self.width,
torch.div(__a , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase__ ( self : List[Any] ):
_a , *_a = self.shape
_a = int(np.prod(__a ) )
_a = self.get_image_coords()
_a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_a = self.get_camera_rays(__a )
_a = rays.view(__a , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase__ ( self : Dict , __a : torch.Tensor ):
_a , *_a , _a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_a = coords.view(__a , -1 , 2 )
_a = self.resolution()
_a = self.fov()
_a = (flat.float() / (res - 1)) * 2 - 1
_a = fracs * torch.tan(fov / 2 )
_a = fracs.view(__a , -1 , 2 )
_a = (
self.z.view(__a , 1 , 3 )
+ self.x.view(__a , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__a , 1 , 3 ) * fracs[:, :, 1:]
)
_a = directions / directions.norm(dim=-1 , keepdim=__a )
_a = torch.stack(
[
torch.broadcast_to(self.origin.view(__a , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__a , *__a , 2 , 3 )
def UpperCamelCase__ ( self : Dict , __a : int , __a : int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__a , height=__a , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase ( lowercase : int ) -> DifferentiableProjectiveCamera:
_a = []
_a = []
_a = []
_a = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_a = np.array([np.sin(lowercase ), np.cos(lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_a = -z * 4
_a = np.array([np.cos(lowercase ), -np.sin(lowercase ), 0.0] )
_a = np.cross(lowercase , lowercase )
origins.append(lowercase )
xs.append(lowercase )
ys.append(lowercase )
zs.append(lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , width=lowercase , height=lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase )) , )
| 346 | 1 |
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
lowerCAmelCase_ : Tuple = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def _lowerCamelCase ( lowercase : str = "dhaka" , lowercase : int = 5 ) -> int:
_a = min(lowercase , 50 ) # Prevent abuse!
_a = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
_a = requests.get("https://www.google.com/search" , params=lowercase , headers=lowercase )
_a = BeautifulSoup(html.text , "html.parser" )
_a = "".join(
re.findall(r"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
_a = json.dumps(lowercase )
_a = json.loads(lowercase )
_a = re.findall(
r"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , lowercase , )
if not matched_google_image_data:
return 0
_a = re.sub(
r"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(lowercase ) , )
_a = re.findall(
r"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , lowercase , )
for index, fixed_full_res_image in enumerate(lowercase ):
if index >= max_images:
return index
_a = bytes(lowercase , "ascii" ).decode(
"unicode-escape" )
_a = bytes(lowercase , "ascii" ).decode(
"unicode-escape" )
_a = urllib.request.build_opener()
_a = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(lowercase )
_a = F'query_{query.replace(" " , "_" )}'
if not os.path.exists(lowercase ):
os.makedirs(lowercase )
urllib.request.urlretrieve( # noqa: S310
lowercase , F'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
lowerCAmelCase_ : str = download_images_from_google_query(sys.argv[1])
print(f"""{image_count} images were downloaded to disk.""")
except IndexError:
print('Please provide a search term.')
raise
| 346 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCAmelCase_ : List[str] = TypeVar('T')
lowerCAmelCase_ : Dict = TypeVar('U')
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : T | None , __a : U | None ):
_a = key
_a = val
_a = None
_a = None
def __repr__( self : Any ):
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Dict ):
_a = DoubleLinkedListNode(__a , __a )
_a = DoubleLinkedListNode(__a , __a )
_a , _a = self.rear, self.head
def __repr__( self : str ):
_a = ["DoubleLinkedList"]
_a = self.head
while node.next is not None:
rep.append(str(__a ) )
_a = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__a )
def UpperCamelCase__ ( self : int , __a : DoubleLinkedListNode[T, U] ):
_a = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_a = node
_a = previous
_a = node
_a = self.rear
def UpperCamelCase__ ( self : Any , __a : DoubleLinkedListNode[T, U] ):
if node.prev is None or node.next is None:
return None
_a = node.next
_a = node.prev
_a = None
_a = None
return node
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
__a ={}
def __init__( self : Union[str, Any] , __a : int ):
_a = DoubleLinkedList()
_a = capacity
_a = 0
_a = 0
_a = 0
_a = {}
def __repr__( self : Optional[int] ):
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self : str , __a : T ):
return key in self.cache
def UpperCamelCase__ ( self : str , __a : T ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_a = self.cache[key]
_a = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__a )
return node.val
self.miss += 1
return None
def UpperCamelCase__ ( self : Tuple , __a : T , __a : U ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_a = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__a ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_a = DoubleLinkedListNode(__a , __a )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_a = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_a = value
self.list.add(__a )
@classmethod
def UpperCamelCase__ ( cls : Tuple , __a : int = 1_28 ):
def cache_decorator_inner(__a : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*__a : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
_a = LRUCache(__a )
_a = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_a = func(*__a )
cls.decorator_function_to_instance_map[func].put(args[0] , __a )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__a , "cache_info" , __a ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 | 1 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : str = '▁'
lowerCAmelCase_ : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =BertGenerationTokenizer
__a =False
__a =True
def UpperCamelCase__ ( self : Optional[Any] ):
super().setUp()
_a = BertGenerationTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self : Tuple ):
_a = "<s>"
_a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def UpperCamelCase__ ( self : List[str] ):
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(__a ) , 10_02 )
def UpperCamelCase__ ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def UpperCamelCase__ ( self : Tuple ):
_a = BertGenerationTokenizer(__a , keep_accents=__a )
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [2_85, 46, 10, 1_70, 3_82] , )
_a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_a = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_a = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase__ ( self : Any ):
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def UpperCamelCase__ ( self : List[str] ):
_a = "Hello World!"
_a = [1_85_36, 22_60, 1_01]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
_a = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_a = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@require_torch
@slow
def UpperCamelCase__ ( self : Tuple ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_a = list(self.big_tokenizer.get_vocab().keys() )[:10]
_a = " ".join(__a )
_a = self.big_tokenizer.encode_plus(__a , return_tensors="pt" , return_token_type_ids=__a )
_a = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__a )
_a = BertGenerationConfig()
_a = BertGenerationEncoder(__a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__a )
model(**__a )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
# fmt: off
_a = {"input_ids": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 346 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase_ : Optional[int] = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase_ : Tuple = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _lowerCamelCase ( lowercase : str ) -> str:
re.sub("<n>" , "" , lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowercase ) )
| 346 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ : Any = 16
lowerCAmelCase_ : List[Any] = 32
def _lowerCamelCase ( lowercase : Accelerator , lowercase : int = 16 ) -> Optional[Any]:
_a = AutoTokenizer.from_pretrained("bert-base-cased" )
_a = load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_a = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a = 16
elif accelerator.mixed_precision != "no":
_a = 8
else:
_a = None
return tokenizer.pad(
lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
_a = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
_a = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ : List[str] = mocked_dataloaders # noqa: F811
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Union[str, Any] ) -> Optional[Any]:
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1":
_a = 2
# Initialize accelerator
_a = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a = config["lr"]
_a = int(config["num_epochs"] )
_a = int(config["seed"] )
_a = int(config["batch_size"] )
_a = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
_a = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a = batch_size // MAX_GPU_BATCH_SIZE
_a = MAX_GPU_BATCH_SIZE
set_seed(lowercase )
_a , _a = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a = model.to(accelerator.device )
# Instantiate optimizer
_a = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
_a = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a = model(**lowercase )
_a = outputs.loss
_a = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_a = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**lowercase )
_a = outputs.logits.argmax(dim=-1 )
_a , _a = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowercase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_a = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_a = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowercase , references=lowercase , )
_a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowercase )
def _lowerCamelCase ( ) -> List[str]:
_a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_a = parser.parse_args()
_a = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 346 |
'''simple docstring'''
import requests
lowerCAmelCase_ : List[Any] = 'YOUR API KEY'
def _lowerCamelCase ( lowercase : str , lowercase : str = giphy_api_key ) -> list:
_a = "+".join(query.split() )
_a = F'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'
_a = requests.get(lowercase ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 346 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowerCAmelCase_ : List[str] = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
lowerCAmelCase_ : Union[str, Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowerCamelCase ( lowercase : str ) -> Any:
_a = {}
with open(lowercase , "r" ) as file:
for line_number, line in enumerate(lowercase ):
_a = line.strip()
if line:
_a = line.split()
_a = line_number
_a = words[0]
_a = value
return result
def _lowerCamelCase ( lowercase : List[str] , lowercase : List[str] , lowercase : List[str] , lowercase : Tuple , lowercase : Tuple ) -> List[str]:
for attribute in key.split("." ):
_a = getattr(lowercase , lowercase )
_a = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase ):
_a = PARAM_MAPPING[full_name.split("." )[-1]]
_a = "param"
if weight_type is not None and weight_type != "param":
_a = getattr(lowercase , lowercase ).shape
elif weight_type is not None and weight_type == "param":
_a = hf_pointer
for attribute in hf_param_name.split("." ):
_a = getattr(lowercase , lowercase )
_a = shape_pointer.shape
# let's reduce dimension
_a = value[0]
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
_a = getattr(lowercase , lowercase )
_a = value
else:
_a = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[Any] , lowercase : Tuple , lowercase : str , lowercase : Dict ) -> Dict:
_a = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase ):
_a = PARAM_MAPPING[full_name.split("." )[-1]]
_a = "param"
if weight_type is not None and weight_type != "param":
_a = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_a = ".".join([key, hf_param_name] )
else:
_a = key
_a = value if "lm_head" in full_key else value[0]
lowerCAmelCase_ : Optional[Any] = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowerCamelCase ( lowercase : int , lowercase : Dict , lowercase : Optional[Any]=None , lowercase : Optional[int]=None ) -> Optional[int]:
_a = False
for key, mapped_key in MAPPING.items():
_a = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_a = True
if "*" in mapped_key:
_a = name.split(lowercase )[0].split("." )[-2]
_a = mapped_key.replace("*" , lowercase )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "bias" in name:
_a = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_a = "weight"
else:
_a = None
if hf_dict is not None:
rename_dict(lowercase , lowercase , lowercase , lowercase , lowercase )
else:
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
return is_used
return is_used
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : str , lowercase : Any ) -> Optional[int]:
_a = []
_a = fairseq_model.state_dict()
_a = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_a = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == "group" , )
_a = True
else:
_a = load_wavaveca_layer(lowercase , lowercase , lowercase )
if not is_used:
unused_weights.append(lowercase )
logger.warning(F'Unused weights: {unused_weights}' )
def _lowerCamelCase ( lowercase : Tuple , lowercase : Dict , lowercase : Any , lowercase : Optional[Any] , lowercase : Dict ) -> Optional[int]:
_a = full_name.split("conv_layers." )[-1]
_a = name.split("." )
_a = int(items[0] )
_a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_a = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_a = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_a = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_a = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any]=None , lowercase : Optional[Any]=None , lowercase : Union[str, Any]=True , lowercase : Union[str, Any]=False ) -> Dict:
if config_path is not None:
_a = WavaVecaConfig.from_pretrained(lowercase )
else:
_a = WavaVecaConfig()
if is_seq_class:
_a = read_txt_into_dict(lowercase )
_a = idalabel
_a = WavaVecaForSequenceClassification(lowercase )
_a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
feature_extractor.save_pretrained(lowercase )
elif is_finetuned:
if dict_path:
_a = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a = target_dict.pad_index
_a = target_dict.bos_index
_a = target_dict.eos_index
_a = len(target_dict.symbols )
_a = os.path.join(lowercase , "vocab.json" )
if not os.path.isdir(lowercase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
_a = target_dict.indices
# fairseq has the <pad> and <s> switched
_a = 0
_a = 1
with open(lowercase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowercase , lowercase )
_a = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowercase , )
_a = True if config.feat_extract_norm == "layer" else False
_a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
_a = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
_a = WavaVecaForCTC(lowercase )
else:
_a = WavaVecaForPreTraining(lowercase )
if is_finetuned or is_seq_class:
_a , _a , _a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_a = argparse.Namespace(task="audio_pretraining" )
_a = fairseq.tasks.setup_task(lowercase )
_a , _a , _a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
_a = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
lowerCAmelCase_ : List[Any] = parser.parse_args()
lowerCAmelCase_ : List[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 346 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : str = '▁'
lowerCAmelCase_ : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =BertGenerationTokenizer
__a =False
__a =True
def UpperCamelCase__ ( self : Optional[Any] ):
super().setUp()
_a = BertGenerationTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self : Tuple ):
_a = "<s>"
_a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def UpperCamelCase__ ( self : List[str] ):
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(__a ) , 10_02 )
def UpperCamelCase__ ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def UpperCamelCase__ ( self : Tuple ):
_a = BertGenerationTokenizer(__a , keep_accents=__a )
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [2_85, 46, 10, 1_70, 3_82] , )
_a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_a = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_a = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase__ ( self : Any ):
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def UpperCamelCase__ ( self : List[str] ):
_a = "Hello World!"
_a = [1_85_36, 22_60, 1_01]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
_a = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_a = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@require_torch
@slow
def UpperCamelCase__ ( self : Tuple ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_a = list(self.big_tokenizer.get_vocab().keys() )[:10]
_a = " ".join(__a )
_a = self.big_tokenizer.encode_plus(__a , return_tensors="pt" , return_token_type_ids=__a )
_a = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__a )
_a = BertGenerationConfig()
_a = BertGenerationEncoder(__a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__a )
model(**__a )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
# fmt: off
_a = {"input_ids": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 346 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =MgpstrTokenizer
__a =False
__a ={}
__a =False
def UpperCamelCase__ ( self : List[str] ):
super().setUp()
# fmt: off
_a = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_a = dict(zip(__a , range(len(__a ) ) ) )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
def UpperCamelCase__ ( self : Tuple , **__a : Tuple ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__a )
def UpperCamelCase__ ( self : Tuple , __a : Any ):
_a = "tester"
_a = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def UpperCamelCase__ ( self : Dict ):
pass
def UpperCamelCase__ ( self : List[str] ):
_a = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_a = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_a = tokenizer.encode([special_token] , add_special_tokens=__a )
self.assertEqual(len(__a ) , 1 )
_a = tokenizer.decode(__a , skip_special_tokens=__a )
self.assertTrue(special_token not in decoded )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_a , _a = self.get_input_output_texts(__a )
_a = tokenizer.tokenize(__a )
_a = tokenizer.convert_tokens_to_ids(__a )
_a = tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
_a = tokenizer.convert_ids_to_tokens(__a )
self.assertNotEqual(len(__a ) , 0 )
_a = tokenizer.decode(__a )
self.assertIsInstance(__a , __a )
self.assertEqual(text_a.replace(" " , "" ) , __a )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def UpperCamelCase__ ( self : str ):
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def UpperCamelCase__ ( self : Optional[int] ):
pass
| 346 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Union[str, Any]:
_enforce_args(lowercase , lowercase )
if n == 0:
return 0
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + naive_cut_rod_recursive(n - i , lowercase ) )
return max_revue
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Tuple:
_enforce_args(lowercase , lowercase )
_a = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowercase , lowercase , lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : list , lowercase : list ) -> List[str]:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowercase , lowercase ) , )
_a = max_revenue
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Any:
_enforce_args(lowercase , lowercase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_a = [float("-inf" ) for _ in range(n + 1 )]
_a = 0
for i in range(1 , n + 1 ):
_a = max_rev[i]
for j in range(1 , i + 1 ):
_a = max(lowercase , prices[j - 1] + max_rev[i - j] )
_a = max_revenue_i
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Dict:
if n < 0:
_a = F'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(lowercase )
if n > len(lowercase ):
_a = (
"Each integral piece of rod must have a corresponding price. "
F'Got n = {n} but length of prices = {len(lowercase )}'
)
raise ValueError(lowercase )
def _lowerCamelCase ( ) -> Any:
_a = [6, 10, 12, 15, 20, 23]
_a = len(lowercase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_a = 36
_a = top_down_cut_rod(lowercase , lowercase )
_a = bottom_up_cut_rod(lowercase , lowercase )
_a = naive_cut_rod_recursive(lowercase , lowercase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 346 | 1 |
'''simple docstring'''
import requests
lowerCAmelCase_ : Union[str, Any] = '' # <-- Put your OpenWeatherMap appid here!
lowerCAmelCase_ : Union[str, Any] = 'https://api.openweathermap.org/data/2.5/'
def _lowerCamelCase ( lowercase : str = "Chicago" , lowercase : str = APPID ) -> dict:
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def _lowerCamelCase ( lowercase : str = "Kolkata, India" , lowercase : str = APPID ) -> dict:
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def _lowerCamelCase ( lowercase : float = 55.68 , lowercase : float = 12.57 , lowercase : str = APPID ) -> dict:
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
lowerCAmelCase_ : Tuple = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 346 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , *__a : Optional[int] , **__a : List[str] ):
super().__init__(*__a , **__a )
self.check_model_type(__a )
def UpperCamelCase__ ( self : Optional[Any] , __a : Dict=None , __a : int=None , __a : Optional[Any]=None , **__a : List[Any] ):
_a , _a = {}, {}
if padding is not None:
_a = padding
if truncation is not None:
_a = truncation
if top_k is not None:
_a = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any] , __a : Union["Image.Image", str] , __a : str = None , **__a : Any ):
if isinstance(__a , (Image.Image, str) ) and isinstance(__a , __a ):
_a = {"image": image, "question": question}
else:
_a = image
_a = super().__call__(__a , **__a )
return results
def UpperCamelCase__ ( self : Tuple , __a : Tuple , __a : Optional[Any]=False , __a : List[Any]=False ):
_a = load_image(inputs["image"] )
_a = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=__a , truncation=__a )
_a = self.image_processor(images=__a , return_tensors=self.framework )
model_inputs.update(__a )
return model_inputs
def UpperCamelCase__ ( self : List[Any] , __a : List[str] ):
_a = self.model(**__a )
return model_outputs
def UpperCamelCase__ ( self : int , __a : Optional[int] , __a : Dict=5 ):
if top_k > self.model.config.num_labels:
_a = self.model.config.num_labels
if self.framework == "pt":
_a = model_outputs.logits.sigmoid()[0]
_a , _a = probs.topk(__a )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
_a = scores.tolist()
_a = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
| 346 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv2ImageProcessor'
__a =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Dict , __a : int=None , __a : List[Any]=None , **__a : str ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Optional[int] , __a : Optional[Any] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Optional[Any] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : int ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : Union[str, Any] , *__a : Optional[int] , **__a : Optional[Any] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : int ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCamelCase__ ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 346 |
'''simple docstring'''
from random import randint, random
def _lowerCamelCase ( lowercase : int , lowercase : int , lowercase : int , lowercase : bool = False , lowercase : bool = False , lowercase : int = 5 , ) -> list:
_a = [[-1] * number_of_cells] # Create a highway without any car
_a = 0
_a = max(lowercase , 0 )
while i < number_of_cells:
_a = (
randint(0 , lowercase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _lowerCamelCase ( lowercase : list , lowercase : int ) -> int:
_a = 0
_a = highway_now[car_index + 1 :]
for cell in range(len(lowercase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowercase , -1 )
def _lowerCamelCase ( lowercase : list , lowercase : float , lowercase : int ) -> list:
_a = len(lowercase )
# Beforce calculations, the highway is empty
_a = [-1] * number_of_cells
for car_index in range(lowercase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_a = min(highway_now[car_index] + 1 , lowercase )
# Number of empty cell before the next car
_a = get_distance(lowercase , lowercase ) - 1
# We can't have the car causing an accident
_a = min(next_highway[car_index] , lowercase )
if random() < probability:
# Randomly, a driver will slow down
_a = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _lowerCamelCase ( lowercase : list , lowercase : int , lowercase : float , lowercase : int ) -> list:
_a = len(highway[0] )
for i in range(lowercase ):
_a = update(highway[i] , lowercase , lowercase )
_a = [-1] * number_of_cells
for car_index in range(lowercase ):
_a = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_a = (car_index + speed) % number_of_cells
# Commit the change of position
_a = speed
highway.append(lowercase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCAmelCase_ : List[str] = TypeVar('T')
lowerCAmelCase_ : Dict = TypeVar('U')
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : T | None , __a : U | None ):
_a = key
_a = val
_a = None
_a = None
def __repr__( self : Any ):
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Dict ):
_a = DoubleLinkedListNode(__a , __a )
_a = DoubleLinkedListNode(__a , __a )
_a , _a = self.rear, self.head
def __repr__( self : str ):
_a = ["DoubleLinkedList"]
_a = self.head
while node.next is not None:
rep.append(str(__a ) )
_a = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__a )
def UpperCamelCase__ ( self : int , __a : DoubleLinkedListNode[T, U] ):
_a = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_a = node
_a = previous
_a = node
_a = self.rear
def UpperCamelCase__ ( self : Any , __a : DoubleLinkedListNode[T, U] ):
if node.prev is None or node.next is None:
return None
_a = node.next
_a = node.prev
_a = None
_a = None
return node
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
__a ={}
def __init__( self : Union[str, Any] , __a : int ):
_a = DoubleLinkedList()
_a = capacity
_a = 0
_a = 0
_a = 0
_a = {}
def __repr__( self : Optional[int] ):
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self : str , __a : T ):
return key in self.cache
def UpperCamelCase__ ( self : str , __a : T ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_a = self.cache[key]
_a = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__a )
return node.val
self.miss += 1
return None
def UpperCamelCase__ ( self : Tuple , __a : T , __a : U ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_a = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__a ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_a = DoubleLinkedListNode(__a , __a )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_a = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_a = value
self.list.add(__a )
@classmethod
def UpperCamelCase__ ( cls : Tuple , __a : int = 1_28 ):
def cache_decorator_inner(__a : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*__a : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
_a = LRUCache(__a )
_a = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_a = func(*__a )
cls.decorator_function_to_instance_map[func].put(args[0] , __a )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__a , "cache_info" , __a ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 10 ) -> str:
if not isinstance(lowercase , lowercase ) or n < 0:
raise ValueError("Invalid input" )
_a = 10**n
_a = 2_8433 * (pow(2 , 783_0457 , lowercase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""")
| 346 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase_ : int = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __a : int , __a : List[Any]=7 , __a : Optional[Any]=3 , __a : Union[str, Any]=18 , __a : Any=30 , __a : Any=4_00 , __a : Tuple=None , __a : Union[str, Any]=True , __a : int=True , __a : int=None , ):
_a = size if size is not None else {"height": 20, "width": 20}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = size
_a = do_normalize
_a = do_convert_rgb
_a = [5_12, 10_24, 20_48, 40_96]
_a = patch_size if patch_size is not None else {"height": 16, "width": 16}
def UpperCamelCase__ ( self : List[Any] ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCamelCase__ ( self : Optional[int] ):
_a = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
_a = Image.open(requests.get(__a , stream=__a ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self : str ):
_a = PixaStructImageProcessingTester(self )
@property
def UpperCamelCase__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "do_convert_rgb" ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self.image_processor_tester.prepare_dummy_image()
_a = self.image_processing_class(**self.image_processor_dict )
_a = 20_48
_a = image_processor(__a , return_tensors="pt" , max_patches=__a )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def UpperCamelCase__ ( self : str ):
# Initialize image_processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
_a = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a = image_processor(
__a , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase__ ( self : Tuple ):
# Initialize image_processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
_a = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
_a = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__a ):
_a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__a ).flattened_patches
_a = "Hello"
_a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__a , header_text=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a = image_processor(
__a , return_tensors="pt" , max_patches=__a , header_text=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase__ ( self : Any ):
# Initialize image_processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
_a = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a = image_processor(
__a , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase__ ( self : Union[str, Any] ):
# Initialize image_processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
_a = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a = image_processor(
__a , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self : str ):
_a = PixaStructImageProcessingTester(self , num_channels=4 )
_a = 3
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "do_convert_rgb" ) )
def UpperCamelCase__ ( self : Optional[int] ):
# Initialize image_processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
_a = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a = image_processor(
__a , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 346 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 6008_5147_5143 ) -> int:
try:
_a = int(lowercase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
_a = 2
_a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_a = i
while n % i == 0:
_a = n // i
i += 1
return int(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 346 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __a : Any , __a : int=1_00 , __a : Dict=13 , __a : Union[str, Any]=30 , __a : Any=2 , __a : Optional[Any]=3 , __a : Optional[Any]=True , __a : Optional[Any]=True , __a : List[Any]=32 , __a : Optional[int]=5 , __a : int=4 , __a : Any=37 , __a : Optional[int]="gelu" , __a : List[str]=0.1 , __a : List[str]=0.1 , __a : Dict=10 , __a : str=0.02 , __a : int=3 , ):
_a = parent
_a = vocab_size
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = type_sequence_label_size
_a = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a = (image_size // patch_size) ** 2
_a = num_patches + 1
def UpperCamelCase__ ( self : int ):
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def UpperCamelCase__ ( self : Dict , __a : Tuple , __a : str , __a : Dict ):
_a = FlaxBeitModel(config=__a )
_a = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self : Optional[Any] , __a : Optional[Any] , __a : List[Any] , __a : List[str] ):
_a = FlaxBeitForMaskedImageModeling(config=__a )
_a = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase__ ( self : int , __a : Any , __a : Any , __a : Optional[Any] ):
_a = self.type_sequence_label_size
_a = FlaxBeitForImageClassification(config=__a )
_a = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a = 1
_a = FlaxBeitForImageClassification(__a )
_a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a = model(__a )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =(
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def UpperCamelCase__ ( self : Dict ):
_a = FlaxBeitModelTester(self )
_a = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def UpperCamelCase__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self : Dict ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__a )
_a = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def UpperCamelCase__ ( self : Tuple ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_a = self._prepare_for_class(__a , __a )
_a = model_class(__a )
@jax.jit
def model_jitted(__a : Union[str, Any] , **__a : Optional[Any] ):
return model(pixel_values=__a , **__a )
with self.subTest("JIT Enabled" ):
_a = model_jitted(**__a ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_a = model_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase__ ( self : int ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def UpperCamelCase__ ( self : List[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def UpperCamelCase__ ( self : int ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def UpperCamelCase__ ( self : Optional[Any] ):
for model_class_name in self.all_model_classes:
_a = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
_a = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(__a )
def _lowerCamelCase ( ) -> Optional[Any]:
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@require_flax
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self : Union[str, Any] ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self : str ):
_a = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=__a , return_tensors="np" ).pixel_values
# prepare bool_masked_pos
_a = np.ones((1, 1_96) , dtype=__a )
# forward pass
_a = model(pixel_values=__a , bool_masked_pos=__a )
_a = outputs.logits
# verify the logits
_a = (1, 1_96, 81_92)
self.assertEqual(logits.shape , __a )
_a = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , __a , atol=1e-2 ) )
@slow
def UpperCamelCase__ ( self : Tuple ):
_a = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=__a , return_tensors="np" )
# forward pass
_a = model(**__a )
_a = outputs.logits
# verify the logits
_a = (1, 10_00)
self.assertEqual(logits.shape , __a )
_a = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , __a , atol=1e-4 ) )
_a = 2_81
self.assertEqual(logits.argmax(-1 ).item() , __a )
@slow
def UpperCamelCase__ ( self : List[Any] ):
_a = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=__a , return_tensors="np" )
# forward pass
_a = model(**__a )
_a = outputs.logits
# verify the logits
_a = (1, 2_18_41)
self.assertEqual(logits.shape , __a )
_a = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , __a , atol=1e-4 ) )
_a = 23_96
self.assertEqual(logits.argmax(-1 ).item() , __a )
| 346 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
lowerCAmelCase_ : List[Any] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = {'facebook/bart-base': BartForConditionalGeneration}
lowerCAmelCase_ : int = {'facebook/bart-base': BartTokenizer}
def _lowerCamelCase ( ) -> Union[str, Any]:
_a = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=lowercase , default=lowercase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=lowercase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=lowercase , default=lowercase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=lowercase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=lowercase , )
parser.add_argument(
"--config_name" , type=lowercase , default=lowercase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=lowercase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=lowercase , default=lowercase , help="Where to store the final ONNX file." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple="cpu" ) -> Optional[Any]:
_a = model_dict[model_name].from_pretrained(lowercase ).to(lowercase )
_a = tokenizer_dict[model_name].from_pretrained(lowercase )
if model_name in ["facebook/bart-base"]:
_a = 0
_a = None
_a = 0
return huggingface_model, tokenizer
def _lowerCamelCase ( lowercase : List[str] , lowercase : Tuple , lowercase : int , lowercase : Any , lowercase : Dict ) -> Any:
model.eval()
_a = None
_a = torch.jit.script(BARTBeamSearchGenerator(lowercase ) )
with torch.no_grad():
_a = "My friends are cool but they eat too many carbs."
_a = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="pt" ).to(model.device )
_a = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=lowercase , max_length=lowercase , early_stopping=lowercase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
lowercase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , lowercase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=lowercase , )
logger.info("Model exported to {}".format(lowercase ) )
_a = remove_dup_initializers(os.path.abspath(lowercase ) )
logger.info("Deduplicated and optimized model written to {}".format(lowercase ) )
_a = onnxruntime.InferenceSession(lowercase )
_a = ort_sess.run(
lowercase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(lowercase ),
"max_length": np.array(lowercase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def _lowerCamelCase ( ) -> Any:
_a = parse_args()
_a = 5
_a = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_a = torch.device(args.device )
_a , _a = load_model_tokenizer(args.model_name_or_path , lowercase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(lowercase )
if args.max_length:
_a = args.max_length
if args.num_beams:
_a = args.num_beams
if args.output_file_path:
_a = args.output_file_path
else:
_a = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(lowercase , lowercase , lowercase , lowercase , lowercase )
if __name__ == "__main__":
main()
| 346 | 1 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : List[Any] , __a : int , __a : int , __a : int , __a : float , __a : int , __a : int , __a : int , __a : int , __a : str , __a : bool = False , ):
super().__init__()
_a = nn.Embedding(__a , __a )
_a = nn.Embedding(__a , __a )
_a = False
_a = nn.Dropout(p=__a )
_a = TaConfig(
vocab_size=__a , d_model=__a , num_heads=__a , d_kv=__a , d_ff=__a , dropout_rate=__a , feed_forward_proj=__a , is_decoder=__a , is_encoder_decoder=__a , )
_a = nn.ModuleList()
for lyr_num in range(__a ):
_a = TaBlock(__a )
self.encoders.append(__a )
_a = TaLayerNorm(__a )
_a = nn.Dropout(p=__a )
def UpperCamelCase__ ( self : str , __a : Union[str, Any] , __a : Dict ):
_a = self.token_embedder(__a )
_a = encoder_input_tokens.shape[1]
_a = torch.arange(__a , device=encoder_input_tokens.device )
x += self.position_encoding(__a )
_a = self.dropout_pre(__a )
# inverted the attention mask
_a = encoder_input_tokens.size()
_a = self.get_extended_attention_mask(__a , __a )
for lyr in self.encoders:
_a = lyr(__a , __a )[0]
_a = self.layer_norm(__a )
return self.dropout_post(__a ), encoder_inputs_mask
| 346 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase_ : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def _lowerCamelCase ( lowercase : str ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase )
def _lowerCamelCase ( lowercase : Dict ) -> str:
from transformers.testing_utils import pytest_terminal_summary_main
_a = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(lowercase , id=lowercase )
| 346 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='dandelin/vilt-b32-finetuned-vqa'
__a =(
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
__a ='image_qa'
__a =AutoProcessor
__a =AutoModelForVisualQuestionAnswering
__a =['image', 'text']
__a =['text']
def __init__( self : Tuple , *__a : List[Any] , **__a : Tuple ):
requires_backends(self , ["vision"] )
super().__init__(*__a , **__a )
def UpperCamelCase__ ( self : Optional[int] , __a : "Image" , __a : str ):
return self.pre_processor(__a , __a , return_tensors="pt" )
def UpperCamelCase__ ( self : Optional[int] , __a : Optional[Any] ):
with torch.no_grad():
return self.model(**__a ).logits
def UpperCamelCase__ ( self : Dict , __a : List[Any] ):
_a = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 346 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : List[Any] , __a : int , __a : int , __a : int , __a : float , __a : int , __a : int , __a : int , __a : int , __a : str , __a : bool = False , ):
super().__init__()
_a = nn.Embedding(__a , __a )
_a = nn.Embedding(__a , __a )
_a = False
_a = nn.Dropout(p=__a )
_a = TaConfig(
vocab_size=__a , d_model=__a , num_heads=__a , d_kv=__a , d_ff=__a , dropout_rate=__a , feed_forward_proj=__a , is_decoder=__a , is_encoder_decoder=__a , )
_a = nn.ModuleList()
for lyr_num in range(__a ):
_a = TaBlock(__a )
self.encoders.append(__a )
_a = TaLayerNorm(__a )
_a = nn.Dropout(p=__a )
def UpperCamelCase__ ( self : str , __a : Union[str, Any] , __a : Dict ):
_a = self.token_embedder(__a )
_a = encoder_input_tokens.shape[1]
_a = torch.arange(__a , device=encoder_input_tokens.device )
x += self.position_encoding(__a )
_a = self.dropout_pre(__a )
# inverted the attention mask
_a = encoder_input_tokens.size()
_a = self.get_extended_attention_mask(__a , __a )
for lyr in self.encoders:
_a = lyr(__a , __a )[0]
_a = self.layer_norm(__a )
return self.dropout_post(__a ), encoder_inputs_mask
| 346 | 1 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =(UnCLIPScheduler,)
def UpperCamelCase__ ( self : Optional[int] , **__a : List[Any] ):
_a = {
"num_train_timesteps": 10_00,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**__a )
return config
def UpperCamelCase__ ( self : List[str] ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a )
def UpperCamelCase__ ( self : int ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__a )
def UpperCamelCase__ ( self : Dict ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__a )
def UpperCamelCase__ ( self : Dict ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__a )
def UpperCamelCase__ ( self : List[str] ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__a )
def UpperCamelCase__ ( self : Union[str, Any] ):
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__a , prev_timestep=__a )
def UpperCamelCase__ ( self : str ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(variance_type="fixed_small_log" )
_a = scheduler_class(**__a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0e-1_0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0549625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.9994987 ) ) < 1e-5
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(variance_type="learned_range" )
_a = scheduler_class(**__a )
_a = 0.5
assert scheduler._get_variance(1 , predicted_variance=__a ) - -10.1712790 < 1e-5
assert scheduler._get_variance(4_87 , predicted_variance=__a ) - -5.7998052 < 1e-5
assert scheduler._get_variance(9_99 , predicted_variance=__a ) - -0.0010011 < 1e-5
def UpperCamelCase__ ( self : Any ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
_a = scheduler.timesteps
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for i, t in enumerate(__a ):
# 1. predict noise residual
_a = model(__a , __a )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(__a , __a , __a , generator=__a ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 252.2682495 ) < 1e-2
assert abs(result_mean.item() - 0.3284743 ) < 1e-3
def UpperCamelCase__ ( self : Any ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
scheduler.set_timesteps(25 )
_a = scheduler.timesteps
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for i, t in enumerate(__a ):
# 1. predict noise residual
_a = model(__a , __a )
if i + 1 == timesteps.shape[0]:
_a = None
else:
_a = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(
__a , __a , __a , prev_timestep=__a , generator=__a ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 258.2044983 ) < 1e-2
assert abs(result_mean.item() - 0.3362038 ) < 1e-3
def UpperCamelCase__ ( self : str ):
pass
def UpperCamelCase__ ( self : Dict ):
pass
| 346 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Any ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[str] , lowercase : Dict ) -> Union[str, Any]:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
_a = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Dict , lowercase : Dict ) -> str:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple , __a : Optional[int] , __a : Any ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Tuple , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Dict=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : List[Any] , __a : Dict ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : str ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 346 | 1 |
'''simple docstring'''
from manim import *
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Dict ):
_a = Rectangle(height=0.5 , width=0.5 )
_a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_a = [mem.copy() for i in range(6 )]
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(__a , __a ).arrange(__a , buff=0 )
_a = Text("CPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
_a = [mem.copy() for i in range(4 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("GPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Model" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
_a = []
for i, rect in enumerate(__a ):
rect.set_stroke(__a )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_a = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__a , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__a , buff=0.0 )
self.add(__a )
cpu_targs.append(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Loaded Checkpoint" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , aligned_edge=__a , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
_a = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_a = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a ) , Write(__a ) )
self.play(Write(__a , run_time=1 ) , Create(__a , run_time=1 ) )
_a = []
_a = []
for i, rect in enumerate(__a ):
_a = fill.copy().set_fill(__a , opacity=0.7 )
target.move_to(__a )
first_animations.append(GrowFromCenter(__a , run_time=1 ) )
_a = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(*__a )
self.wait()
| 346 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
lowerCAmelCase_ : str = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
lowerCAmelCase_ : Union[str, Any] = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def _lowerCamelCase ( lowercase : List[str] ) -> List[Any]:
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = numpy_to_pil(lowercase )
return images
def _lowerCamelCase ( lowercase : int ) -> List[Any]:
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
_a = [Image.fromarray(lowercase ) for image in images]
return pil_images
| 346 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
_a = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.dummy_uncond_unet
_a = PNDMScheduler()
_a = PNDMPipeline(unet=__a , scheduler=__a )
pndm.to(__a )
pndm.set_progress_bar_config(disable=__a )
_a = torch.manual_seed(0 )
_a = pndm(generator=__a , num_inference_steps=20 , output_type="numpy" ).images
_a = torch.manual_seed(0 )
_a = pndm(generator=__a , num_inference_steps=20 , output_type="numpy" , return_dict=__a )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : List[Any] ):
_a = "google/ddpm-cifar10-32"
_a = UNetaDModel.from_pretrained(__a )
_a = PNDMScheduler()
_a = PNDMPipeline(unet=__a , scheduler=__a )
pndm.to(__a )
pndm.set_progress_bar_config(disable=__a )
_a = torch.manual_seed(0 )
_a = pndm(generator=__a , output_type="numpy" ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 346 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[int]:
_a = 10
_a = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
_a = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(lowercase ) ),
} , features=lowercase , )
return dataset
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : int ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=lowercase )
return filename
# FILE_CONTENT + files
lowerCAmelCase_ : Union[str, Any] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> List[Any]:
_a = tmp_path_factory.mktemp("data" ) / "file.txt"
_a = FILE_CONTENT
with open(lowercase , "w" ) as f:
f.write(lowercase )
return filename
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> str:
import bza
_a = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
_a = bytes(lowercase , "utf-8" )
with bza.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> Optional[Any]:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
_a = bytes(lowercase , "utf-8" )
with gzip.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Union[str, Any]:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_a = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
_a = bytes(lowercase , "utf-8" )
with lza.frame.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple ) -> Optional[Any]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_a = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(lowercase , "w" ) as archive:
archive.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : Optional[Any] ) -> Dict:
import tarfile
_a = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any ) -> Union[str, Any]:
import lzma
_a = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
_a = bytes(lowercase , "utf-8" )
with lzma.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int , lowercase : Any ) -> Union[str, Any]:
import zipfile
_a = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict ) -> List[str]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_a = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
_a = bytes(lowercase , "utf-8" )
with zstd.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> Union[str, Any]:
_a = tmp_path_factory.mktemp("data" ) / "file.xml"
_a = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(lowercase , "w" ) as f:
f.write(lowercase )
return filename
lowerCAmelCase_ : Optional[int] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowerCAmelCase_ : List[Any] = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowerCAmelCase_ : Dict = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase_ : Dict = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowerCAmelCase_ : List[Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> str:
_a = datasets.Dataset.from_dict(lowercase )
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict ) -> Dict:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
_a = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> str:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(lowercase , "w" , newline="" ) as f:
_a = csv.DictWriter(lowercase , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(lowercase , "w" , newline="" ) as f:
_a = csv.DictWriter(lowercase , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> int:
import bza
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(lowercase , "rb" ) as f:
_a = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Any , lowercase : Any ) -> List[str]:
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : Any , lowercase : List[Any] ) -> Dict:
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(lowercase , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Optional[Any] , lowercase : int ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[Any] ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
_a = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(lowercase , "wb" ) as f:
_a = pq.ParquetWriter(lowercase , schema=lowercase )
_a = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase ) )] for k in DATA[0]} , schema=lowercase )
writer.write_table(lowercase )
writer.close()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_a = {"data": DATA}
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_a = {"data": DATA_DICT_OF_LISTS}
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] ) -> str:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> List[str]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> int:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] , lowercase : Dict ) -> Tuple:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(lowercase , "rb" ) as orig_file:
with gzip.open(lowercase , "wb" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : List[Any] ) -> List[Any]:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(lowercase , "rb" ) as orig_file:
with gzip.open(lowercase , "wb" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[Any] , lowercase : int ) -> str:
_a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : int , lowercase : List[Any] ) -> Optional[int]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("nested" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[str] , lowercase : str ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : Any , lowercase : Optional[int] ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str , lowercase : List[str] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.join("nested" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> str:
_a = ["0", "1", "2", "3"]
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> Dict:
_a = ["0", "1", "2", "3"]
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
_a = ["0", "1", "2", "3"]
_a = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Union[str, Any] , lowercase : Any ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : List[str] , lowercase : List[str] ) -> Union[str, Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : int , lowercase : str ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename("unsupported.ext" ) )
f.write(lowercase , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[Any] ) -> Any:
_a = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
_a = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[Any]:
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[int]:
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> Dict:
_a = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> str:
_a = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 346 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowerCamelCase ( lowercase : list[int] , lowercase : int ) -> int:
if len(lowercase ) < k or k < 0:
raise ValueError("Invalid Input" )
_a = _a = sum(array[:k] )
for i in range(len(lowercase ) - k ):
_a = current_sum - array[i] + array[i + k]
_a = max(lowercase , lowercase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowerCAmelCase_ : int = [randint(-10_00, 10_00) for i in range(1_00)]
lowerCAmelCase_ : List[str] = randint(0, 1_10)
print(f"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 346 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv2ImageProcessor'
__a =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Dict , __a : int=None , __a : List[Any]=None , **__a : str ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Optional[int] , __a : Optional[Any] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Optional[Any] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : int ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : Union[str, Any] , *__a : Optional[int] , **__a : Optional[Any] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : int ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCamelCase__ ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 346 | 1 |
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def _lowerCamelCase ( lowercase : int = 100_0000 , lowercase : int = 10 ) -> int:
_a = defaultdict(lowercase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_a = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_a = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 346 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = '▁'
lowerCAmelCase_ : Optional[Any] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
lowerCAmelCase_ : Optional[int] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
lowerCAmelCase_ : List[str] = {
'facebook/s2t-small-librispeech-asr': 10_24,
}
lowerCAmelCase_ : List[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
lowerCAmelCase_ : Union[str, Any] = {'mustc': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =MAX_MODEL_INPUT_SIZES
__a =['input_ids', 'attention_mask']
__a =[]
def __init__( self : Optional[Any] , __a : Optional[Any] , __a : Any , __a : Any="<s>" , __a : List[str]="</s>" , __a : str="<pad>" , __a : List[str]="<unk>" , __a : Union[str, Any]=False , __a : Any=False , __a : List[str]=None , __a : Optional[int]=None , __a : Optional[Dict[str, Any]] = None , **__a : int , ):
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , do_upper_case=__a , do_lower_case=__a , tgt_lang=__a , lang_codes=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_a = do_upper_case
_a = do_lower_case
_a = load_json(__a )
_a = {v: k for k, v in self.encoder.items()}
_a = spm_file
_a = load_spm(__a , self.sp_model_kwargs )
if lang_codes is not None:
_a = lang_codes
_a = LANGUAGES[lang_codes]
_a = [f'<lang:{lang}>' for lang in self.langs]
_a = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
_a = self.lang_tokens
_a = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_a = {}
@property
def UpperCamelCase__ ( self : str ):
return len(self.encoder )
@property
def UpperCamelCase__ ( self : str ):
return self._tgt_lang
@tgt_lang.setter
def UpperCamelCase__ ( self : Optional[int] , __a : Any ):
_a = new_tgt_lang
self.set_tgt_lang_special_tokens(__a )
def UpperCamelCase__ ( self : List[Any] , __a : str ):
_a = self.lang_code_to_id[tgt_lang]
_a = [lang_code_id]
def UpperCamelCase__ ( self : Dict , __a : str ):
return self.sp_model.encode(__a , out_type=__a )
def UpperCamelCase__ ( self : List[str] , __a : Any ):
return self.encoder.get(__a , self.encoder[self.unk_token] )
def UpperCamelCase__ ( self : str , __a : int ):
return self.decoder.get(__a , self.unk_token )
def UpperCamelCase__ ( self : str , __a : List[str] ):
_a = []
_a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_a = self.sp_model.decode(__a )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_a = []
else:
current_sub_tokens.append(__a )
_a = self.sp_model.decode(__a )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCamelCase__ ( self : int , __a : Any , __a : int=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self : Any , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
_a = [1] * len(self.prefix_tokens )
_a = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self : str , __a : Dict ):
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase__ ( self : List[str] , __a : str , __a : Optional[str] = None ):
_a = Path(__a )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __a )
if os.path.abspath(self.spm_file ) != os.path.abspath(__a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __a )
elif not os.path.isfile(self.spm_file ):
with open(__a , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__a )
return (str(__a ), str(__a ))
def _lowerCamelCase ( lowercase : str , lowercase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_a = sentencepiece.SentencePieceProcessor(**lowercase )
spm.Load(str(lowercase ) )
return spm
def _lowerCamelCase ( lowercase : str ) -> Union[Dict, List]:
with open(lowercase , "r" ) as f:
return json.load(lowercase )
def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> None:
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase , indent=2 )
| 346 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = torch.device('cpu')
def _lowerCamelCase ( ) -> List[str]:
_a = "http://images.cocodataset.org/val2017/000000039769.jpg"
_a = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def _lowerCamelCase ( lowercase : Tuple ) -> int:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] )
def _lowerCamelCase ( lowercase : Tuple , lowercase : Tuple , lowercase : Optional[int] ) -> str:
_a = dct.pop(lowercase )
_a = val
def _lowerCamelCase ( lowercase : List[Any] ) -> Tuple:
_a = []
for k in state_dict.keys():
_a = k
if ".pwconv" in k:
_a = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
_a = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
_a = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
_a = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
_a = k_new.split("." )
if ls[2].isdigit():
_a = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
_a = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _lowerCamelCase ( lowercase : List[str] , lowercase : Union[str, Any] , lowercase : Optional[int] ) -> Optional[int]:
_a = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_a = 1000
_a = "huggingface/label-files"
_a = "imagenet-1k-id2label.json"
_a = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
_a = {int(lowercase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_a = [3, 3, 6, 4]
_a = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
_a = [3, 3, 9, 6]
_a = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
_a = [4, 3, 10, 5]
_a = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
_a = [4, 4, 12, 6]
_a = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
_a = torch.hub.load_state_dict_from_url(lowercase , map_location="cpu" , check_hash=lowercase )
else:
_a = torch.load(lowercase , map_location="cpu" )
_a = checkpoint
_a = create_rename_keys(lowercase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# load HuggingFace model
_a = SwiftFormerForImageClassification(lowercase ).eval()
hf_model.load_state_dict(lowercase )
# prepare test inputs
_a = prepare_img()
_a = ViTImageProcessor.from_pretrained("preprocessor_config" )
_a = processor(images=lowercase , return_tensors="pt" )
# compare outputs from both models
_a = get_expected_output(lowercase )
_a = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , lowercase , atol=1E-3 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(F'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 346 |
'''simple docstring'''
from manim import *
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Dict ):
_a = Rectangle(height=0.5 , width=0.5 )
_a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_a = [mem.copy() for i in range(6 )]
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(__a , __a ).arrange(__a , buff=0 )
_a = Text("CPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
_a = [mem.copy() for i in range(4 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("GPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Model" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
_a = []
for i, rect in enumerate(__a ):
rect.set_stroke(__a )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_a = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__a , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__a , buff=0.0 )
self.add(__a )
cpu_targs.append(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Loaded Checkpoint" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , aligned_edge=__a , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
_a = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_a = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a ) , Write(__a ) )
self.play(Write(__a , run_time=1 ) , Create(__a , run_time=1 ) )
_a = []
_a = []
for i, rect in enumerate(__a ):
_a = fill.copy().set_fill(__a , opacity=0.7 )
target.move_to(__a )
first_animations.append(GrowFromCenter(__a , run_time=1 ) )
_a = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(*__a )
self.wait()
| 346 | 1 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCAmelCase_ : List[str] = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCAmelCase_ : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _lowerCamelCase ( lowercase : str ) -> str:
if "://" in dataset_path:
_a = dataset_path.split("://" )[1]
return dataset_path
def _lowerCamelCase ( lowercase : fsspec.AbstractFileSystem ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _lowerCamelCase ( lowercase : fsspec.AbstractFileSystem , lowercase : str , lowercase : str ) -> Tuple:
_a = not is_remote_filesystem(lowercase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase ) , fs._strip_protocol(lowercase ) )
else:
fs.mv(lowercase , lowercase , recursive=lowercase )
def _lowerCamelCase ( ) -> None:
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_a = None
_a = None
_a = threading.Lock()
| 346 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowerCAmelCase_ : Tuple = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def _lowerCamelCase ( lowercase : List[Any] ) -> Optional[int]:
_a = test_results.split(" " )
_a = 0
_a = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_a = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCamelCase ( lowercase : str ) -> Optional[Any]:
_a = {}
_a = None
_a = False
for line in failures_short_lines.split("\n" ):
if re.search(r"_ \[doctest\]" , lowercase ):
_a = True
_a = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
_a = line
_a = False
return failures
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple , __a : str , __a : Dict ):
_a = title
_a = doc_test_results["time_spent"].split("," )[0]
_a = doc_test_results["success"]
_a = doc_test_results["failures"]
_a = self.n_success + self.n_failures
# Failures and success of the modeling tests
_a = doc_test_results
@property
def UpperCamelCase__ ( self : int ):
_a = [self._time_spent]
_a = 0
for time in time_spent:
_a = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__a ) == 1:
_a = [0, 0, time_parts[0]]
_a , _a , _a = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_a , _a , _a = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(__a )}h{int(__a )}m{int(__a )}s'
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : str ):
_a = 40
_a = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(__a , __a )}
_a = ""
for category, failures in category_failures.items():
if len(__a ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
_a = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__a )
@staticmethod
def UpperCamelCase__ ( ):
_a = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(__a )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=__a , )
def UpperCamelCase__ ( self : Tuple ):
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
_a = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
_a = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=__a , )
def UpperCamelCase__ ( self : Dict , __a : List[str] , __a : List[Any] , __a : Tuple , __a : int ):
_a = ""
for key, value in failures.items():
_a = value[:2_00] + " [Truncated]" if len(__a ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
_a = job_name
_a = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
_a = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCamelCase__ ( self : str ):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
_a = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
_a = sorted(self.doc_test_results.items() , key=lambda __a : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
_a = f'*Num failures* :{len(job_result["failed"] )} \n'
_a = job_result["failures"]
_a = self.get_reply_blocks(__a , __a , __a , text=__a )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=f'Results for {job}' , blocks=__a , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def _lowerCamelCase ( ) -> Any:
_a = os.environ["GITHUB_RUN_ID"]
_a = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
_a = requests.get(lowercase ).json()
_a = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_a = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowercase ):
_a = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase )
return {}
def _lowerCamelCase ( lowercase : str ) -> Dict:
_a = {}
if os.path.exists(lowercase ):
_a = os.listdir(lowercase )
for file in files:
try:
with open(os.path.join(lowercase , lowercase ) , encoding="utf-8" ) as f:
_a = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase , lowercase )}.' ) from e
return _artifact
def _lowerCamelCase ( ) -> str:
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , __a : str ):
_a = name
_a = []
def __str__( self : List[str] ):
return self.name
def UpperCamelCase__ ( self : str , __a : str ):
self.paths.append({"name": self.name, "path": path} )
_a = {}
_a = filter(os.path.isdir , os.listdir() )
for directory in directories:
_a = directory
if artifact_name not in _available_artifacts:
_a = Artifact(lowercase )
_available_artifacts[artifact_name].add_path(lowercase )
return _available_artifacts
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = get_job_links()
lowerCAmelCase_ : Any = retrieve_available_artifacts()
lowerCAmelCase_ : List[str] = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowerCAmelCase_ : Optional[Any] = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowerCAmelCase_ : int = github_actions_job_links.get('run_doctests')
lowerCAmelCase_ : Union[str, Any] = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
lowerCAmelCase_ : List[str] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = handle_test_results(artifact['stats'])
lowerCAmelCase_ : List[str] = failed
lowerCAmelCase_ : Optional[Any] = success
lowerCAmelCase_ : Tuple = time_spent[1:-1] + ', '
lowerCAmelCase_ : List[Any] = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
lowerCAmelCase_ : int = line.replace('FAILED ', '')
lowerCAmelCase_ : Optional[int] = line.split()[0].replace('\n', '')
if "::" in line:
lowerCAmelCase_ , lowerCAmelCase_ : str = line.split('::')
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowerCAmelCase_ : Union[str, Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowerCAmelCase_ : List[str] = all_failures[test] if test in all_failures else 'N/A'
lowerCAmelCase_ : Optional[Any] = failure
break
lowerCAmelCase_ : Tuple = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 346 | 1 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , __a : pyspark.sql.DataFrame , __a : Optional[NamedSplit] = None , __a : Optional[Features] = None , __a : bool = True , __a : str = None , __a : bool = False , __a : str = None , __a : bool = True , __a : str = "arrow" , **__a : Optional[int] , ):
super().__init__(
split=__a , features=__a , cache_dir=__a , keep_in_memory=__a , streaming=__a , **__a , )
_a = load_from_cache_file
_a = file_format
_a = Spark(
df=__a , features=__a , cache_dir=__a , working_dir=__a , **__a , )
def UpperCamelCase__ ( self : int ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_a = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__a , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 346 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCamelCase ( ) -> str:
_a = HfArgumentParser(lowercase )
_a = parser.parse_args_into_dataclasses()[0]
_a = TensorFlowBenchmark(args=lowercase )
try:
_a = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_a = "Arg --no_{0} is no longer used, please use --no-{0} instead."
_a = " ".join(str(lowercase ).split(" " )[:-1] )
_a = ""
_a = eval(str(lowercase ).split(" " )[-1] )
_a = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase )
if len(lowercase ) > 0:
_a = full_error_msg + begin_error_msg + str(lowercase )
raise ValueError(lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 346 | 1 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowerCAmelCase_ : Union[str, Any] = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] , __a : Path , __a : Union[str, None] = None , __a : Union[List[str], None] = None , __a : Union[str, List[str], None] = None , __a : bool = True , ):
_a = [file for file in os.listdir(__a ) if os.path.isfile(os.path.join(__a , __a ) )]
if identifier is not None:
_a = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__a , __a ):
for n_ in n_identifier:
_a = [file for file in files if n_ not in file]
else:
_a = [file for file in files if n_identifier not in file]
_a = ignore_files or []
ignore_files.append("__init__.py" )
_a = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , __a )
if only_modules:
_a = file.split("." )[0]
try:
_a = getattr(__a , __a )
_a = doctest.DocTestSuite(__a )
_a = unittest.TextTestRunner().run(__a )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'{module_identifier} is not a module.' )
else:
_a = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = Path("src/transformers" )
_a = "modeling"
_a = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(__a , identifier=__a , ignore_files=__a )
def UpperCamelCase__ ( self : str ):
_a = Path("src/transformers" )
_a = "tokenization"
self.analyze_directory(__a , identifier=__a )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = Path("src/transformers" )
_a = "configuration"
self.analyze_directory(__a , identifier=__a )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = Path("src/transformers" )
_a = ["configuration", "modeling", "tokenization"]
self.analyze_directory(__a , n_identifier=__a )
def UpperCamelCase__ ( self : List[Any] ):
_a = Path("docs/source" )
_a = ["favicon.ico"]
self.analyze_directory(__a , ignore_files=__a , only_modules=__a )
| 346 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase_ : Union[str, Any] = None
try:
import msvcrt
except ImportError:
lowerCAmelCase_ : Tuple = None
try:
import fcntl
except ImportError:
lowerCAmelCase_ : Optional[int] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase_ : Any = OSError
# Data
# ------------------------------------------------
lowerCAmelCase_ : Tuple = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
lowerCAmelCase_ : Optional[int] = '3.0.12'
lowerCAmelCase_ : Tuple = None
def _lowerCamelCase ( ) -> Optional[int]:
global _logger
_a = _logger or logging.getLogger(__name__ )
return _logger
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Dict , __a : Optional[Any] ):
_a = lock_file
return None
def __str__( self : Any ):
_a = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[Any] , __a : Optional[int] ):
_a = lock
return None
def __enter__( self : str ):
return self.lock
def __exit__( self : List[Any] , __a : List[Any] , __a : Union[str, Any] , __a : Dict ):
self.lock.release()
return None
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : Union[str, Any] , __a : Optional[int]=-1 , __a : Tuple=None ):
_a = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
_a = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
_a = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_a = None
# The default timeout value.
_a = timeout
# We use this lock primarily for the lock counter.
_a = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_a = 0
return None
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self._lock_file
@property
def UpperCamelCase__ ( self : List[Any] ):
return self._timeout
@timeout.setter
def UpperCamelCase__ ( self : int , __a : List[Any] ):
_a = float(__a )
return None
def UpperCamelCase__ ( self : Dict ):
raise NotImplementedError()
def UpperCamelCase__ ( self : str ):
raise NotImplementedError()
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self._lock_file_fd is not None
def UpperCamelCase__ ( self : int , __a : int=None , __a : Tuple=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
_a = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_a = id(self )
_a = self._lock_file
_a = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_a = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCamelCase__ ( self : Union[str, Any] , __a : int=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_a = id(self )
_a = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
_a = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self : List[Any] ):
self.acquire()
return self
def __exit__( self : str , __a : str , __a : Dict , __a : Dict ):
self.release()
return None
def __del__( self : int ):
self.release(force=__a )
return None
def UpperCamelCase__ ( self : Tuple , __a : str , __a : int ):
_a = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
_a = os.path.dirname(__a )
_a = str(hash(__a ) )
_a = filename[: max_length - len(__a ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(__a , __a )
else:
return path
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : int , __a : str , __a : List[Any]=-1 , __a : List[Any]=None ):
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
_a = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def UpperCamelCase__ ( self : int ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self._lock_file_fd
_a = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , __a : Optional[Any] , __a : Union[str, Any]=-1 , __a : int=None ):
_a = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def UpperCamelCase__ ( self : Any ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_a = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : Tuple ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_a = self._lock_file_fd
_a = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
_a = fd
return None
def UpperCamelCase__ ( self : Union[str, Any] ):
os.close(self._lock_file_fd )
_a = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase_ : str = None
if msvcrt:
lowerCAmelCase_ : List[str] = WindowsFileLock
elif fcntl:
lowerCAmelCase_ : List[str] = UnixFileLock
else:
lowerCAmelCase_ : int = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 346 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple ):
# test for the above condition
self.test()
def UpperCamelCase__ ( self : List[Any] ):
_a = 0
_a = False
while not completed:
if counter == 1:
self.reset()
_a = self.advance()
if not self.does_advance(__a ):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." )
_a , _a , _a = self.update(__a )
counter += 1
if counter > 1_00_00:
raise Exception("update() does not fulfill the constraint." )
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly." )
@abstractmethod
def UpperCamelCase__ ( self : List[str] ):
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def UpperCamelCase__ ( self : Optional[Any] , __a : int ):
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def UpperCamelCase__ ( self : Tuple , __a : int ):
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def UpperCamelCase__ ( self : List[Any] ):
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def UpperCamelCase__ ( self : Tuple ):
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def UpperCamelCase__ ( self : Optional[Any] , __a : Tuple=False ):
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : List[int] ):
super(__a , self ).__init__()
if not isinstance(__a , __a ) or len(__a ) == 0:
raise ValueError(f'`token_ids` has to be a non-empty list, but is {token_ids}.' )
if any((not isinstance(__a , __a ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' )
_a = token_ids
_a = len(self.token_ids )
_a = -1 # the index of the currently fulfilled step
_a = False
def UpperCamelCase__ ( self : Union[str, Any] ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase__ ( self : Optional[int] , __a : int ):
if not isinstance(__a , __a ):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(__a )}' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase__ ( self : Optional[int] , __a : int ):
if not isinstance(__a , __a ):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(__a )}' )
_a = False
_a = False
_a = False
if self.does_advance(__a ):
self.fulfilled_idx += 1
_a = True
if self.fulfilled_idx == (self.seqlen - 1):
_a = True
_a = completed
else:
# failed to make progress.
_a = True
self.reset()
return stepped, completed, reset
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = False
_a = 0
def UpperCamelCase__ ( self : int ):
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCamelCase__ ( self : Tuple , __a : Tuple=False ):
_a = PhrasalConstraint(self.token_ids )
if stateful:
_a = self.seqlen
_a = self.fulfilled_idx
_a = self.completed
return new_constraint
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : int , __a : List[List[int]] , __a : int=True ):
_a = max([len(__a ) for one in nested_token_ids] )
_a = {}
for token_ids in nested_token_ids:
_a = root
for tidx, token_id in enumerate(__a ):
if token_id not in level:
_a = {}
_a = level[token_id]
if no_subsets and self.has_subsets(__a , __a ):
raise ValueError(
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
f' {nested_token_ids}.' )
_a = root
def UpperCamelCase__ ( self : int , __a : Optional[Any] ):
_a = self.trie
for current_token in current_seq:
_a = start[current_token]
_a = list(start.keys() )
return next_tokens
def UpperCamelCase__ ( self : Optional[int] , __a : Tuple ):
_a = self.next_tokens(__a )
return len(__a ) == 0
def UpperCamelCase__ ( self : Tuple , __a : Tuple ):
_a = list(root.values() )
if len(__a ) == 0:
return 1
else:
return sum([self.count_leaves(__a ) for nn in next_nodes] )
def UpperCamelCase__ ( self : Dict , __a : List[str] , __a : List[Any] ):
_a = self.count_leaves(__a )
return len(__a ) != leaf_count
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : str , __a : List[List[int]] ):
super(__a , self ).__init__()
if not isinstance(__a , __a ) or len(__a ) == 0:
raise ValueError(f'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' )
if any(not isinstance(__a , __a ) for token_ids in nested_token_ids ):
raise ValueError(f'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' )
if any(
any((not isinstance(__a , __a ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' )
_a = DisjunctiveTrie(__a )
_a = nested_token_ids
_a = self.trie.max_height
_a = []
_a = False
def UpperCamelCase__ ( self : List[Any] ):
_a = self.trie.next_tokens(self.current_seq )
if len(__a ) == 0:
return None
else:
return token_list
def UpperCamelCase__ ( self : Dict , __a : int ):
if not isinstance(__a , __a ):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(__a )}' )
_a = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCamelCase__ ( self : Tuple , __a : int ):
if not isinstance(__a , __a ):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(__a )}' )
_a = False
_a = False
_a = False
if self.does_advance(__a ):
self.current_seq.append(__a )
_a = True
else:
_a = True
self.reset()
_a = self.trie.reached_leaf(self.current_seq )
_a = completed
return stepped, completed, reset
def UpperCamelCase__ ( self : List[Any] ):
_a = False
_a = []
def UpperCamelCase__ ( self : Optional[int] ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCamelCase__ ( self : Any , __a : Union[str, Any]=False ):
_a = DisjunctiveConstraint(self.token_ids )
if stateful:
_a = self.seqlen
_a = self.current_seq
_a = self.completed
return new_constraint
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[Any] , __a : List[Constraint] ):
_a = constraints
# max # of steps required to fulfill a given constraint
_a = max([c.seqlen for c in constraints] )
_a = len(__a )
_a = False
self.init_state()
def UpperCamelCase__ ( self : List[Any] ):
_a = []
_a = None
_a = [constraint.copy(stateful=__a ) for constraint in self.constraints]
def UpperCamelCase__ ( self : Dict ):
_a = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCamelCase__ ( self : Optional[int] ):
_a = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
_a = constraint.advance()
if isinstance(__a , __a ):
token_list.append(__a )
elif isinstance(__a , __a ):
token_list.extend(__a )
else:
_a = self.inprogress_constraint.advance()
if isinstance(__a , __a ):
token_list.append(__a )
elif isinstance(__a , __a ):
token_list.extend(__a )
if len(__a ) == 0:
return None
else:
return token_list
def UpperCamelCase__ ( self : Optional[int] , __a : Optional[List[int]] ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
_a , _a = self.add(__a )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCamelCase__ ( self : List[Any] , __a : int ):
if not isinstance(__a , __a ):
raise ValueError(f'`token_id` should be an `int`, but is `{token_id}`.' )
_a , _a = False, False
if self.completed:
_a = True
_a = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
_a , _a , _a = self.inprogress_constraint.update(__a )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__a ) )
_a = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
_a = None
if len(self.pending_constraints ) == 0:
# we're done!
_a = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(__a ):
_a , _a , _a = pending_constraint.update(__a )
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true." )
if complete:
self.complete_constraints.append(__a )
_a = None
if not complete and stepped:
_a = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
_a = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
_a = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCamelCase__ ( self : List[str] , __a : Tuple=True ):
_a = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
_a = [
constraint.copy(stateful=__a ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
_a = self.inprogress_constraint.copy(stateful=__a )
_a = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 346 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42
__a =42
__a =42
__a =42
__a =42
def UpperCamelCase__ ( self : str ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase__ ( self : List[str] ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = torch.arange(self.height * self.width )
_a = torch.stack(
[
pixel_indices % self.width,
torch.div(__a , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase__ ( self : List[Any] ):
_a , *_a = self.shape
_a = int(np.prod(__a ) )
_a = self.get_image_coords()
_a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_a = self.get_camera_rays(__a )
_a = rays.view(__a , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase__ ( self : Dict , __a : torch.Tensor ):
_a , *_a , _a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_a = coords.view(__a , -1 , 2 )
_a = self.resolution()
_a = self.fov()
_a = (flat.float() / (res - 1)) * 2 - 1
_a = fracs * torch.tan(fov / 2 )
_a = fracs.view(__a , -1 , 2 )
_a = (
self.z.view(__a , 1 , 3 )
+ self.x.view(__a , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__a , 1 , 3 ) * fracs[:, :, 1:]
)
_a = directions / directions.norm(dim=-1 , keepdim=__a )
_a = torch.stack(
[
torch.broadcast_to(self.origin.view(__a , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__a , *__a , 2 , 3 )
def UpperCamelCase__ ( self : Dict , __a : int , __a : int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__a , height=__a , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase ( lowercase : int ) -> DifferentiableProjectiveCamera:
_a = []
_a = []
_a = []
_a = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_a = np.array([np.sin(lowercase ), np.cos(lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_a = -z * 4
_a = np.array([np.cos(lowercase ), -np.sin(lowercase ), 0.0] )
_a = np.cross(lowercase , lowercase )
origins.append(lowercase )
xs.append(lowercase )
ys.append(lowercase )
zs.append(lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , width=lowercase , height=lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase )) , )
| 346 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =(PNDMScheduler,)
__a =(('num_inference_steps', 50),)
def UpperCamelCase__ ( self : Tuple , **__a : Tuple ):
_a = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__a )
return config
def UpperCamelCase__ ( self : Tuple , __a : Union[str, Any]=0 , **__a : Any ):
_a = dict(self.forward_default_kwargs )
_a = kwargs.pop("num_inference_steps" , __a )
_a = self.dummy_sample
_a = 0.1 * sample
_a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_a = self.get_scheduler_config(**__a )
_a = scheduler_class(**__a )
scheduler.set_timesteps(__a )
# copy over dummy past residuals
_a = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a )
_a = scheduler_class.from_pretrained(__a )
new_scheduler.set_timesteps(__a )
# copy over dummy past residuals
_a = dummy_past_residuals[:]
_a = scheduler.step_prk(__a , __a , __a , **__a ).prev_sample
_a = new_scheduler.step_prk(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_a = scheduler.step_plms(__a , __a , __a , **__a ).prev_sample
_a = new_scheduler.step_plms(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self : Optional[int] ):
pass
def UpperCamelCase__ ( self : str , __a : Optional[Any]=0 , **__a : Optional[Any] ):
_a = dict(self.forward_default_kwargs )
_a = kwargs.pop("num_inference_steps" , __a )
_a = self.dummy_sample
_a = 0.1 * sample
_a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
scheduler.set_timesteps(__a )
# copy over dummy past residuals (must be after setting timesteps)
_a = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a )
_a = scheduler_class.from_pretrained(__a )
# copy over dummy past residuals
new_scheduler.set_timesteps(__a )
# copy over dummy past residual (must be after setting timesteps)
_a = dummy_past_residuals[:]
_a = scheduler.step_prk(__a , __a , __a , **__a ).prev_sample
_a = new_scheduler.step_prk(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_a = scheduler.step_plms(__a , __a , __a , **__a ).prev_sample
_a = new_scheduler.step_plms(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self : Optional[int] , **__a : Tuple ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(**__a )
_a = scheduler_class(**__a )
_a = 10
_a = self.dummy_model()
_a = self.dummy_sample_deter
scheduler.set_timesteps(__a )
for i, t in enumerate(scheduler.prk_timesteps ):
_a = model(__a , __a )
_a = scheduler.step_prk(__a , __a , __a ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_a = model(__a , __a )
_a = scheduler.step_plms(__a , __a , __a ).prev_sample
return sample
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = dict(self.forward_default_kwargs )
_a = kwargs.pop("num_inference_steps" , __a )
for scheduler_class in self.scheduler_classes:
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
_a = self.dummy_sample
_a = 0.1 * sample
if num_inference_steps is not None and hasattr(__a , "set_timesteps" ):
scheduler.set_timesteps(__a )
elif num_inference_steps is not None and not hasattr(__a , "set_timesteps" ):
_a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_a = dummy_past_residuals[:]
_a = scheduler.step_prk(__a , 0 , __a , **__a ).prev_sample
_a = scheduler.step_prk(__a , 1 , __a , **__a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_a = scheduler.step_plms(__a , 0 , __a , **__a ).prev_sample
_a = scheduler.step_plms(__a , 1 , __a , **__a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase__ ( self : List[Any] ):
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a )
def UpperCamelCase__ ( self : str ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__a )
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(steps_offset=1 )
_a = scheduler_class(**__a )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def UpperCamelCase__ ( self : List[str] ):
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def UpperCamelCase__ ( self : Tuple ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__a )
def UpperCamelCase__ ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def UpperCamelCase__ ( self : Tuple ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=__a )
def UpperCamelCase__ ( self : Optional[Any] ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=__a )
def UpperCamelCase__ ( self : Optional[Any] ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
_a = 27
for scheduler_class in self.scheduler_classes:
_a = self.dummy_sample
_a = 0.1 * sample
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
scheduler.set_timesteps(__a )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_a = scheduler.step_prk(__a , __a , __a ).prev_sample
def UpperCamelCase__ ( self : Optional[Any] ):
with self.assertRaises(__a ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.full_loop()
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def UpperCamelCase__ ( self : Dict ):
_a = self.full_loop(prediction_type="v_prediction" )
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def UpperCamelCase__ ( self : Optional[int] ):
# We specify different beta, so that the first alpha is 0.99
_a = self.full_loop(set_alpha_to_one=__a , beta_start=0.01 )
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def UpperCamelCase__ ( self : Any ):
# We specify different beta, so that the first alpha is 0.99
_a = self.full_loop(set_alpha_to_one=__a , beta_start=0.01 )
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3
| 346 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCAmelCase_ : List[str] = TypeVar('T')
lowerCAmelCase_ : Dict = TypeVar('U')
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : T | None , __a : U | None ):
_a = key
_a = val
_a = None
_a = None
def __repr__( self : Any ):
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Dict ):
_a = DoubleLinkedListNode(__a , __a )
_a = DoubleLinkedListNode(__a , __a )
_a , _a = self.rear, self.head
def __repr__( self : str ):
_a = ["DoubleLinkedList"]
_a = self.head
while node.next is not None:
rep.append(str(__a ) )
_a = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__a )
def UpperCamelCase__ ( self : int , __a : DoubleLinkedListNode[T, U] ):
_a = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_a = node
_a = previous
_a = node
_a = self.rear
def UpperCamelCase__ ( self : Any , __a : DoubleLinkedListNode[T, U] ):
if node.prev is None or node.next is None:
return None
_a = node.next
_a = node.prev
_a = None
_a = None
return node
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
__a ={}
def __init__( self : Union[str, Any] , __a : int ):
_a = DoubleLinkedList()
_a = capacity
_a = 0
_a = 0
_a = 0
_a = {}
def __repr__( self : Optional[int] ):
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self : str , __a : T ):
return key in self.cache
def UpperCamelCase__ ( self : str , __a : T ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_a = self.cache[key]
_a = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__a )
return node.val
self.miss += 1
return None
def UpperCamelCase__ ( self : Tuple , __a : T , __a : U ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_a = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__a ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_a = DoubleLinkedListNode(__a , __a )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_a = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_a = value
self.list.add(__a )
@classmethod
def UpperCamelCase__ ( cls : Tuple , __a : int = 1_28 ):
def cache_decorator_inner(__a : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*__a : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
_a = LRUCache(__a )
_a = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_a = func(*__a )
cls.decorator_function_to_instance_map[func].put(args[0] , __a )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__a , "cache_info" , __a ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 | 1 |
'''simple docstring'''
from math import factorial
def _lowerCamelCase ( lowercase : int = 20 ) -> int:
_a = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
_a = n // 2
return int(factorial(lowercase ) / (factorial(lowercase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowerCAmelCase_ : Optional[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 346 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase_ : Optional[int] = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase_ : Tuple = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _lowerCamelCase ( lowercase : str ) -> str:
re.sub("<n>" , "" , lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowercase ) )
| 346 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def _lowerCamelCase ( lowercase : Optional[int] ) -> Optional[Any]:
if "cls_token" in name:
_a = name.replace("cls_token" , "vit.embeddings.cls_token" )
if "mask_token" in name:
_a = name.replace("mask_token" , "decoder.mask_token" )
if "decoder_pos_embed" in name:
_a = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
_a = name.replace("pos_embed" , "vit.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
_a = name.replace("patch_embed.proj" , "vit.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_a = name.replace("patch_embed.norm" , "vit.embeddings.norm" )
if "decoder_blocks" in name:
_a = name.replace("decoder_blocks" , "decoder.decoder_layers" )
if "blocks" in name:
_a = name.replace("blocks" , "vit.encoder.layer" )
if "attn.proj" in name:
_a = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_a = name.replace("attn" , "attention.self" )
if "norm1" in name:
_a = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_a = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_a = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_a = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
_a = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
_a = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
_a = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name:
_a = name.replace("norm.weight" , "vit.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name:
_a = name.replace("norm.bias" , "vit.layernorm.bias" )
return name
def _lowerCamelCase ( lowercase : Tuple , lowercase : List[str] ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(lowercase )
if "qkv" in key:
_a = key.split("." )
_a = int(key_split[1] )
if "decoder_blocks" in key:
_a = config.decoder_hidden_size
_a = "decoder.decoder_layers."
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
elif "bias" in key:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
else:
_a = config.hidden_size
_a = "vit.encoder.layer."
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
elif "bias" in key:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
else:
_a = val
return orig_state_dict
def _lowerCamelCase ( lowercase : str , lowercase : Dict ) -> int:
_a = ViTMAEConfig()
if "large" in checkpoint_url:
_a = 1024
_a = 4096
_a = 24
_a = 16
elif "huge" in checkpoint_url:
_a = 14
_a = 1280
_a = 5120
_a = 32
_a = 16
_a = ViTMAEForPreTraining(lowercase )
_a = torch.hub.load_state_dict_from_url(lowercase , map_location="cpu" )["model"]
_a = ViTMAEImageProcessor(size=config.image_size )
_a = convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
_a = "https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"
_a = Image.open(requests.get(lowercase , stream=lowercase ).raw )
_a = ViTMAEImageProcessor(size=config.image_size )
_a = image_processor(images=lowercase , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
_a = model(**lowercase )
_a = outputs.logits
if "large" in checkpoint_url:
_a = torch.tensor(
[[-0.73_09, -0.71_28, -1.01_69], [-1.01_61, -0.90_58, -1.18_78], [-1.04_78, -0.94_11, -1.19_11]] )
elif "huge" in checkpoint_url:
_a = torch.tensor(
[[-1.15_99, -0.91_99, -1.22_21], [-1.19_52, -0.92_69, -1.23_07], [-1.21_43, -0.93_37, -1.22_62]] )
else:
_a = torch.tensor(
[[-0.91_92, -0.84_81, -1.12_59], [-1.13_49, -1.00_34, -1.25_99], [-1.17_57, -1.04_29, -1.27_26]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1E-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ : List[str] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 346 |
'''simple docstring'''
import requests
lowerCAmelCase_ : List[Any] = 'YOUR API KEY'
def _lowerCamelCase ( lowercase : str , lowercase : str = giphy_api_key ) -> list:
_a = "+".join(query.split() )
_a = F'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'
_a = requests.get(lowercase ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 346 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCAmelCase_ : Dict = ['bert-base-uncased', 'bert-base-cased']
lowerCAmelCase_ : Dict = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class __SCREAMING_SNAKE_CASE (tf.keras.Model ):
"""simple docstring"""
def __init__( self : Optional[int] , __a : int ):
super().__init__()
_a = tokenizer
_a = AutoConfig.from_pretrained(__a )
_a = TFAutoModel.from_config(__a )
def UpperCamelCase__ ( self : str , __a : Optional[Any] ):
_a = self.tokenizer(__a )
_a = self.bert(**__a )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ):
super().setUp()
_a = [
BertTokenizer.from_pretrained(__a ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_a = [TFBertTokenizer.from_pretrained(__a ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__a , use_fast_bert_tokenizer=__a )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_a = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
_a = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCamelCase__ ( self : int ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
_a = tokenizer(__a , return_tensors="tf" , padding="longest" )
_a = tf_tokenizer(__a )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
for tf_tokenizer in self.tf_tokenizers:
_a = tf_tokenizer(self.paired_sentences )
_a = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
for tf_tokenizer in self.tf_tokenizers:
_a = tf.function(__a )
for test_inputs in (self.test_sentences, self.paired_sentences):
_a = tf.constant(__a )
_a = compiled_tokenizer(__a )
_a = tf_tokenizer(__a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase__ ( self : Union[str, Any] ):
for tf_tokenizer in self.tf_tokenizers:
_a = ModelToSave(tokenizer=__a )
_a = tf.convert_to_tensor(self.test_sentences )
_a = model(__a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_a = Path(__a ) / "saved.model"
model.save(__a )
_a = tf.keras.models.load_model(__a )
_a = loaded_model(__a )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 346 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : str = '▁'
lowerCAmelCase_ : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =BertGenerationTokenizer
__a =False
__a =True
def UpperCamelCase__ ( self : Optional[Any] ):
super().setUp()
_a = BertGenerationTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self : Tuple ):
_a = "<s>"
_a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def UpperCamelCase__ ( self : List[str] ):
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(__a ) , 10_02 )
def UpperCamelCase__ ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def UpperCamelCase__ ( self : Tuple ):
_a = BertGenerationTokenizer(__a , keep_accents=__a )
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [2_85, 46, 10, 1_70, 3_82] , )
_a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_a = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_a = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase__ ( self : Any ):
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def UpperCamelCase__ ( self : List[str] ):
_a = "Hello World!"
_a = [1_85_36, 22_60, 1_01]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
_a = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_a = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@require_torch
@slow
def UpperCamelCase__ ( self : Tuple ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_a = list(self.big_tokenizer.get_vocab().keys() )[:10]
_a = " ".join(__a )
_a = self.big_tokenizer.encode_plus(__a , return_tensors="pt" , return_token_type_ids=__a )
_a = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__a )
_a = BertGenerationConfig()
_a = BertGenerationEncoder(__a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__a )
model(**__a )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
# fmt: off
_a = {"input_ids": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 346 | 1 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , *__a : Optional[int] , **__a : List[str] ):
super().__init__(*__a , **__a )
self.check_model_type(__a )
def UpperCamelCase__ ( self : Optional[Any] , __a : Dict=None , __a : int=None , __a : Optional[Any]=None , **__a : List[Any] ):
_a , _a = {}, {}
if padding is not None:
_a = padding
if truncation is not None:
_a = truncation
if top_k is not None:
_a = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any] , __a : Union["Image.Image", str] , __a : str = None , **__a : Any ):
if isinstance(__a , (Image.Image, str) ) and isinstance(__a , __a ):
_a = {"image": image, "question": question}
else:
_a = image
_a = super().__call__(__a , **__a )
return results
def UpperCamelCase__ ( self : Tuple , __a : Tuple , __a : Optional[Any]=False , __a : List[Any]=False ):
_a = load_image(inputs["image"] )
_a = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=__a , truncation=__a )
_a = self.image_processor(images=__a , return_tensors=self.framework )
model_inputs.update(__a )
return model_inputs
def UpperCamelCase__ ( self : List[Any] , __a : List[str] ):
_a = self.model(**__a )
return model_outputs
def UpperCamelCase__ ( self : int , __a : Optional[int] , __a : Dict=5 ):
if top_k > self.model.config.num_labels:
_a = self.model.config.num_labels
if self.framework == "pt":
_a = model_outputs.logits.sigmoid()[0]
_a , _a = probs.topk(__a )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
_a = scores.tolist()
_a = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
| 346 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Union[str, Any]:
_enforce_args(lowercase , lowercase )
if n == 0:
return 0
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + naive_cut_rod_recursive(n - i , lowercase ) )
return max_revue
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Tuple:
_enforce_args(lowercase , lowercase )
_a = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowercase , lowercase , lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : list , lowercase : list ) -> List[str]:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowercase , lowercase ) , )
_a = max_revenue
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Any:
_enforce_args(lowercase , lowercase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_a = [float("-inf" ) for _ in range(n + 1 )]
_a = 0
for i in range(1 , n + 1 ):
_a = max_rev[i]
for j in range(1 , i + 1 ):
_a = max(lowercase , prices[j - 1] + max_rev[i - j] )
_a = max_revenue_i
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Dict:
if n < 0:
_a = F'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(lowercase )
if n > len(lowercase ):
_a = (
"Each integral piece of rod must have a corresponding price. "
F'Got n = {n} but length of prices = {len(lowercase )}'
)
raise ValueError(lowercase )
def _lowerCamelCase ( ) -> Any:
_a = [6, 10, 12, 15, 20, 23]
_a = len(lowercase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_a = 36
_a = top_down_cut_rod(lowercase , lowercase )
_a = bottom_up_cut_rod(lowercase , lowercase )
_a = naive_cut_rod_recursive(lowercase , lowercase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 346 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
lowerCAmelCase_ : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =42
__a =42
__a =42
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =42
__a =42
__a =None
__a =None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='train'
__a ='dev'
__a ='test'
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __a : str , __a : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def UpperCamelCase__ ( __a : str ):
raise NotImplementedError
@staticmethod
def UpperCamelCase__ ( __a : List[InputExample] , __a : List[str] , __a : int , __a : PreTrainedTokenizer , __a : List[str]=False , __a : Dict="[CLS]" , __a : Tuple=1 , __a : Union[str, Any]="[SEP]" , __a : List[Any]=False , __a : List[str]=False , __a : Dict=0 , __a : List[Any]=0 , __a : Optional[Any]=-1_00 , __a : List[str]=0 , __a : Optional[Any]=True , ):
_a = {label: i for i, label in enumerate(__a )}
_a = []
for ex_index, example in enumerate(__a ):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d of %d" , __a , len(__a ) )
_a = []
_a = []
for word, label in zip(example.words , example.labels ):
_a = tokenizer.tokenize(__a )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__a ) > 0:
tokens.extend(__a )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__a ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_a = tokenizer.num_special_tokens_to_add()
if len(__a ) > max_seq_length - special_tokens_count:
_a = tokens[: (max_seq_length - special_tokens_count)]
_a = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_a = [sequence_a_segment_id] * len(__a )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_a = [cls_token] + tokens
_a = [pad_token_label_id] + label_ids
_a = [cls_token_segment_id] + segment_ids
_a = tokenizer.convert_tokens_to_ids(__a )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_a = [1 if mask_padding_with_zero else 0] * len(__a )
# Zero-pad up to the sequence length.
_a = max_seq_length - len(__a )
if pad_on_left:
_a = ([pad_token] * padding_length) + input_ids
_a = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_a = ([pad_token_segment_id] * padding_length) + segment_ids
_a = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__a ) == max_seq_length
assert len(__a ) == max_seq_length
assert len(__a ) == max_seq_length
assert len(__a ) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***" )
logger.info("guid: %s" , example.guid )
logger.info("tokens: %s" , " ".join([str(__a ) for x in tokens] ) )
logger.info("input_ids: %s" , " ".join([str(__a ) for x in input_ids] ) )
logger.info("input_mask: %s" , " ".join([str(__a ) for x in input_mask] ) )
logger.info("segment_ids: %s" , " ".join([str(__a ) for x in segment_ids] ) )
logger.info("label_ids: %s" , " ".join([str(__a ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_a = None
features.append(
InputFeatures(
input_ids=__a , attention_mask=__a , token_type_ids=__a , label_ids=__a ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =42
__a =nn.CrossEntropyLoss().ignore_index
def __init__( self : Optional[int] , __a : TokenClassificationTask , __a : str , __a : PreTrainedTokenizer , __a : List[str] , __a : str , __a : Optional[int] = None , __a : Any=False , __a : Split = Split.train , ):
# Load data features from cache or dataset file
_a = os.path.join(
__a , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(__a ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a = cached_features_file + ".lock"
with FileLock(__a ):
if os.path.exists(__a ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
_a = torch.load(__a )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
_a = token_classification_task.read_examples_from_file(__a , __a )
# TODO clean up all this to leverage built-in features of tokenizers
_a = token_classification_task.convert_examples_to_features(
__a , __a , __a , __a , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__a , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'Saving features into cached file {cached_features_file}' )
torch.save(self.features , __a )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Any , __a : Optional[int] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =42
__a =-100
def __init__( self : Tuple , __a : TokenClassificationTask , __a : str , __a : PreTrainedTokenizer , __a : List[str] , __a : str , __a : Optional[int] = None , __a : Dict=False , __a : Split = Split.train , ):
_a = token_classification_task.read_examples_from_file(__a , __a )
# TODO clean up all this to leverage built-in features of tokenizers
_a = token_classification_task.convert_examples_to_features(
__a , __a , __a , __a , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__a , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_a = tf.data.Dataset.from_generator(
__a , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , (
{"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_a = tf.data.Dataset.from_generator(
__a , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , (
{
"input_ids": tf.TensorShape([None] ),
"attention_mask": tf.TensorShape([None] ),
"token_type_ids": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def UpperCamelCase__ ( self : Dict ):
_a = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Union[str, Any] ):
return len(self.features )
def __getitem__( self : Any , __a : Optional[Any] ):
return self.features[i]
| 346 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , *__a : Optional[int] , **__a : List[str] ):
super().__init__(*__a , **__a )
self.check_model_type(__a )
def UpperCamelCase__ ( self : Optional[Any] , __a : Dict=None , __a : int=None , __a : Optional[Any]=None , **__a : List[Any] ):
_a , _a = {}, {}
if padding is not None:
_a = padding
if truncation is not None:
_a = truncation
if top_k is not None:
_a = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any] , __a : Union["Image.Image", str] , __a : str = None , **__a : Any ):
if isinstance(__a , (Image.Image, str) ) and isinstance(__a , __a ):
_a = {"image": image, "question": question}
else:
_a = image
_a = super().__call__(__a , **__a )
return results
def UpperCamelCase__ ( self : Tuple , __a : Tuple , __a : Optional[Any]=False , __a : List[Any]=False ):
_a = load_image(inputs["image"] )
_a = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=__a , truncation=__a )
_a = self.image_processor(images=__a , return_tensors=self.framework )
model_inputs.update(__a )
return model_inputs
def UpperCamelCase__ ( self : List[Any] , __a : List[str] ):
_a = self.model(**__a )
return model_outputs
def UpperCamelCase__ ( self : int , __a : Optional[int] , __a : Dict=5 ):
if top_k > self.model.config.num_labels:
_a = self.model.config.num_labels
if self.framework == "pt":
_a = model_outputs.logits.sigmoid()[0]
_a , _a = probs.topk(__a )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
_a = scores.tolist()
_a = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
| 346 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : int ):
_a = num_of_nodes
_a = []
_a = {}
def UpperCamelCase__ ( self : List[str] , __a : int , __a : int , __a : int ):
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase__ ( self : Any , __a : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase__ ( self : List[str] , __a : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
_a = self.find_component(__a )
def UpperCamelCase__ ( self : Dict , __a : list[int] , __a : int , __a : int ):
if component_size[u_node] <= component_size[v_node]:
_a = v_node
component_size[v_node] += component_size[u_node]
self.set_component(__a )
elif component_size[u_node] >= component_size[v_node]:
_a = self.find_component(__a )
component_size[u_node] += component_size[v_node]
self.set_component(__a )
def UpperCamelCase__ ( self : Any ):
_a = []
_a = 0
_a = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_a = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_a , _a , _a = edge
_a = self.m_component[u]
_a = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_a = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(__a , __a ):
_a , _a , _a = edge
_a = self.m_component[u]
_a = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__a , __a , __a )
print(f'Added edge [{u} - {v}]\nAdded weight: {w}\n' )
num_of_components -= 1
_a = [-1] * self.m_num_of_nodes
print(f'The total weight of the minimal spanning tree is: {mst_weight}' )
def _lowerCamelCase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 |
'''simple docstring'''
from random import randint, random
def _lowerCamelCase ( lowercase : int , lowercase : int , lowercase : int , lowercase : bool = False , lowercase : bool = False , lowercase : int = 5 , ) -> list:
_a = [[-1] * number_of_cells] # Create a highway without any car
_a = 0
_a = max(lowercase , 0 )
while i < number_of_cells:
_a = (
randint(0 , lowercase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _lowerCamelCase ( lowercase : list , lowercase : int ) -> int:
_a = 0
_a = highway_now[car_index + 1 :]
for cell in range(len(lowercase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowercase , -1 )
def _lowerCamelCase ( lowercase : list , lowercase : float , lowercase : int ) -> list:
_a = len(lowercase )
# Beforce calculations, the highway is empty
_a = [-1] * number_of_cells
for car_index in range(lowercase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_a = min(highway_now[car_index] + 1 , lowercase )
# Number of empty cell before the next car
_a = get_distance(lowercase , lowercase ) - 1
# We can't have the car causing an accident
_a = min(next_highway[car_index] , lowercase )
if random() < probability:
# Randomly, a driver will slow down
_a = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _lowerCamelCase ( lowercase : list , lowercase : int , lowercase : float , lowercase : int ) -> list:
_a = len(highway[0] )
for i in range(lowercase ):
_a = update(highway[i] , lowercase , lowercase )
_a = [-1] * number_of_cells
for car_index in range(lowercase ):
_a = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_a = (car_index + speed) % number_of_cells
# Commit the change of position
_a = speed
highway.append(lowercase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ):
"""simple docstring"""
__a =['flax']
def __init__( self : Optional[int] , *__a : Tuple , **__a : str ):
requires_backends(self , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : Optional[Any] , *__a : Union[str, Any] , **__a : int ):
requires_backends(cls , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : str , *__a : str , **__a : Any ):
requires_backends(cls , ["flax"] )
class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ):
"""simple docstring"""
__a =['flax']
def __init__( self : List[Any] , *__a : List[Any] , **__a : List[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : int , *__a : Optional[int] , **__a : Optional[int] ):
requires_backends(cls , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : Optional[int] , *__a : Dict , **__a : Tuple ):
requires_backends(cls , ["flax"] )
class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ):
"""simple docstring"""
__a =['flax']
def __init__( self : Optional[Any] , *__a : Tuple , **__a : Any ):
requires_backends(self , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : str , *__a : int , **__a : Dict ):
requires_backends(cls , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : Tuple , *__a : Optional[int] , **__a : str ):
requires_backends(cls , ["flax"] )
class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ):
"""simple docstring"""
__a =['flax']
def __init__( self : int , *__a : str , **__a : int ):
requires_backends(self , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : str , *__a : Optional[int] , **__a : str ):
requires_backends(cls , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : List[Any] , *__a : List[str] , **__a : Tuple ):
requires_backends(cls , ["flax"] )
class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ):
"""simple docstring"""
__a =['flax']
def __init__( self : Tuple , *__a : Dict , **__a : Any ):
requires_backends(self , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : Any , *__a : Tuple , **__a : str ):
requires_backends(cls , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : int , *__a : Union[str, Any] , **__a : Any ):
requires_backends(cls , ["flax"] )
class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ):
"""simple docstring"""
__a =['flax']
def __init__( self : Union[str, Any] , *__a : Dict , **__a : int ):
requires_backends(self , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : Optional[int] , *__a : Dict , **__a : Union[str, Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : Optional[Any] , *__a : Tuple , **__a : Tuple ):
requires_backends(cls , ["flax"] )
class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ):
"""simple docstring"""
__a =['flax']
def __init__( self : int , *__a : Optional[int] , **__a : Union[str, Any] ):
requires_backends(self , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : Dict , *__a : str , **__a : Any ):
requires_backends(cls , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : List[str] , *__a : Dict , **__a : Any ):
requires_backends(cls , ["flax"] )
class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ):
"""simple docstring"""
__a =['flax']
def __init__( self : int , *__a : Tuple , **__a : Any ):
requires_backends(self , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : Union[str, Any] , *__a : str , **__a : Optional[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : Tuple , *__a : Optional[int] , **__a : Union[str, Any] ):
requires_backends(cls , ["flax"] )
class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ):
"""simple docstring"""
__a =['flax']
def __init__( self : Tuple , *__a : Dict , **__a : Tuple ):
requires_backends(self , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : Optional[int] , *__a : List[Any] , **__a : Dict ):
requires_backends(cls , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : Dict , *__a : List[str] , **__a : Any ):
requires_backends(cls , ["flax"] )
class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ):
"""simple docstring"""
__a =['flax']
def __init__( self : Union[str, Any] , *__a : Union[str, Any] , **__a : List[str] ):
requires_backends(self , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : Any , *__a : List[Any] , **__a : Optional[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : Any , *__a : Any , **__a : Union[str, Any] ):
requires_backends(cls , ["flax"] )
class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ):
"""simple docstring"""
__a =['flax']
def __init__( self : str , *__a : Optional[int] , **__a : Dict ):
requires_backends(self , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : Dict , *__a : str , **__a : Dict ):
requires_backends(cls , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : str , *__a : List[Any] , **__a : Optional[int] ):
requires_backends(cls , ["flax"] )
class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ):
"""simple docstring"""
__a =['flax']
def __init__( self : Optional[Any] , *__a : Optional[Any] , **__a : str ):
requires_backends(self , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : List[Any] , *__a : Union[str, Any] , **__a : int ):
requires_backends(cls , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : List[Any] , *__a : List[Any] , **__a : List[str] ):
requires_backends(cls , ["flax"] )
class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ):
"""simple docstring"""
__a =['flax']
def __init__( self : Optional[int] , *__a : Tuple , **__a : int ):
requires_backends(self , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : Optional[int] , *__a : str , **__a : List[str] ):
requires_backends(cls , ["flax"] )
@classmethod
def UpperCamelCase__ ( cls : str , *__a : List[Any] , **__a : Optional[Any] ):
requires_backends(cls , ["flax"] )
| 346 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 10 ) -> str:
if not isinstance(lowercase , lowercase ) or n < 0:
raise ValueError("Invalid input" )
_a = 10**n
_a = 2_8433 * (pow(2 , 783_0457 , lowercase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""")
| 346 | 1 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
lowerCAmelCase_ : List[Any] = get_logger(__name__)
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : str , __a : Tuple , __a : Dict=None ):
_a = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self , __a , getattr(__a , __a ) )
_a = module._original_module if isinstance(__a , _PatchedModuleObj ) else module
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =[]
def __init__( self : List[Any] , __a : str , __a : str , __a : Dict , __a : Optional[int]=None ):
_a = obj
_a = target
_a = new
_a = target.split("." )[0]
_a = {}
_a = attrs or []
def __enter__( self : List[str] ):
*_a , _a = self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__a ) ):
try:
_a = import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_a = getattr(self.obj , __a )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__a , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_a = obj_attr
# patch at top level
setattr(self.obj , __a , _PatchedModuleObj(__a , attrs=self.attrs ) )
_a = getattr(self.obj , __a )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__a , __a , _PatchedModuleObj(getattr(__a , __a , __a ) , attrs=self.attrs ) )
_a = getattr(__a , __a )
# finally set the target attribute
setattr(__a , __a , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_a = getattr(import_module(".".join(__a ) ) , __a )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , __a ) is attr_value:
_a = getattr(self.obj , __a )
setattr(self.obj , __a , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_a = globals()["__builtins__"][target_attr]
setattr(self.obj , __a , self.new )
else:
raise RuntimeError(f'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__( self : str , *__a : str ):
for attr in list(self.original ):
setattr(self.obj , __a , self.original.pop(__a ) )
def UpperCamelCase__ ( self : Optional[Any] ):
self.__enter__()
self._active_patches.append(self )
def UpperCamelCase__ ( self : List[Any] ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 346 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 6008_5147_5143 ) -> int:
try:
_a = int(lowercase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
_a = 2
_a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_a = i
while n % i == 0:
_a = n // i
i += 1
return int(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 346 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCAmelCase_ : str = logging.get_logger(__name__)
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> List[List[ImageInput]]:
if isinstance(lowercase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['pixel_values']
def __init__( self : List[str] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 2_55 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : Union[str, Any] , ):
super().__init__(**__a )
_a = size if size is not None else {"shortest_edge": 2_56}
_a = get_size_dict(__a , default_to_square=__a )
_a = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
_a = get_size_dict(__a , param_name="crop_size" )
_a = do_resize
_a = size
_a = do_center_crop
_a = crop_size
_a = resample
_a = do_rescale
_a = rescale_factor
_a = offset
_a = do_normalize
_a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ):
_a = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" in size:
_a = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a )
elif "height" in size and "width" in size:
_a = (size["height"], size["width"])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def UpperCamelCase__ ( self : int , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Dict , ):
_a = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def UpperCamelCase__ ( self : str , __a : np.ndarray , __a : Union[int, float] , __a : bool = True , __a : Optional[Union[str, ChannelDimension]] = None , **__a : str , ):
_a = image.astype(np.floataa )
if offset:
_a = image - (scale / 2)
return rescale(__a , scale=__a , data_format=__a , **__a )
def UpperCamelCase__ ( self : int , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ):
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def UpperCamelCase__ ( self : str , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
_a = to_numpy_array(__a )
if do_resize:
_a = self.resize(image=__a , size=__a , resample=__a )
if do_center_crop:
_a = self.center_crop(__a , size=__a )
if do_rescale:
_a = self.rescale(image=__a , scale=__a , offset=__a )
if do_normalize:
_a = self.normalize(image=__a , mean=__a , std=__a )
_a = to_channel_dimension_format(__a , __a )
return image
def UpperCamelCase__ ( self : str , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : Optional[Any] , ):
_a = do_resize if do_resize is not None else self.do_resize
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = offset if offset is not None else self.offset
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = size if size is not None else self.size
_a = get_size_dict(__a , default_to_square=__a )
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(__a , param_name="crop_size" )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_a = make_batched(__a )
_a = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
_a = {"pixel_values": videos}
return BatchFeature(data=__a , tensor_type=__a )
| 346 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
lowerCAmelCase_ : List[Any] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = {'facebook/bart-base': BartForConditionalGeneration}
lowerCAmelCase_ : int = {'facebook/bart-base': BartTokenizer}
def _lowerCamelCase ( ) -> Union[str, Any]:
_a = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=lowercase , default=lowercase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=lowercase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=lowercase , default=lowercase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=lowercase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=lowercase , )
parser.add_argument(
"--config_name" , type=lowercase , default=lowercase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=lowercase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=lowercase , default=lowercase , help="Where to store the final ONNX file." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple="cpu" ) -> Optional[Any]:
_a = model_dict[model_name].from_pretrained(lowercase ).to(lowercase )
_a = tokenizer_dict[model_name].from_pretrained(lowercase )
if model_name in ["facebook/bart-base"]:
_a = 0
_a = None
_a = 0
return huggingface_model, tokenizer
def _lowerCamelCase ( lowercase : List[str] , lowercase : Tuple , lowercase : int , lowercase : Any , lowercase : Dict ) -> Any:
model.eval()
_a = None
_a = torch.jit.script(BARTBeamSearchGenerator(lowercase ) )
with torch.no_grad():
_a = "My friends are cool but they eat too many carbs."
_a = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="pt" ).to(model.device )
_a = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=lowercase , max_length=lowercase , early_stopping=lowercase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
lowercase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , lowercase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=lowercase , )
logger.info("Model exported to {}".format(lowercase ) )
_a = remove_dup_initializers(os.path.abspath(lowercase ) )
logger.info("Deduplicated and optimized model written to {}".format(lowercase ) )
_a = onnxruntime.InferenceSession(lowercase )
_a = ort_sess.run(
lowercase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(lowercase ),
"max_length": np.array(lowercase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def _lowerCamelCase ( ) -> Any:
_a = parse_args()
_a = 5
_a = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_a = torch.device(args.device )
_a , _a = load_model_tokenizer(args.model_name_or_path , lowercase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(lowercase )
if args.max_length:
_a = args.max_length
if args.num_beams:
_a = args.num_beams
if args.output_file_path:
_a = args.output_file_path
else:
_a = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(lowercase , lowercase , lowercase , lowercase , lowercase )
if __name__ == "__main__":
main()
| 346 | 1 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase_ : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def _lowerCamelCase ( lowercase : str ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase )
def _lowerCamelCase ( lowercase : Dict ) -> str:
from transformers.testing_utils import pytest_terminal_summary_main
_a = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(lowercase , id=lowercase )
| 346 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase_ : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def _lowerCamelCase ( lowercase : str ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase )
def _lowerCamelCase ( lowercase : Dict ) -> str:
from transformers.testing_utils import pytest_terminal_summary_main
_a = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(lowercase , id=lowercase )
| 346 | 1 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase_ : Optional[int] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[str] , __a : dict[str, list[str]] , __a : str ):
_a = graph
# mapping node to its parent in resulting breadth first tree
_a = {}
_a = source_vertex
def UpperCamelCase__ ( self : Optional[Any] ):
_a = {self.source_vertex}
_a = None
_a = [self.source_vertex] # first in first out queue
while queue:
_a = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__a )
_a = vertex
queue.append(__a )
def UpperCamelCase__ ( self : Optional[Any] , __a : str ):
if target_vertex == self.source_vertex:
return self.source_vertex
_a = self.parent.get(__a )
if target_vertex_parent is None:
_a = (
f'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(__a )
return self.shortest_path(__a ) + f'->{target_vertex}'
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 346 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : List[Any] , __a : int , __a : int , __a : int , __a : float , __a : int , __a : int , __a : int , __a : int , __a : str , __a : bool = False , ):
super().__init__()
_a = nn.Embedding(__a , __a )
_a = nn.Embedding(__a , __a )
_a = False
_a = nn.Dropout(p=__a )
_a = TaConfig(
vocab_size=__a , d_model=__a , num_heads=__a , d_kv=__a , d_ff=__a , dropout_rate=__a , feed_forward_proj=__a , is_decoder=__a , is_encoder_decoder=__a , )
_a = nn.ModuleList()
for lyr_num in range(__a ):
_a = TaBlock(__a )
self.encoders.append(__a )
_a = TaLayerNorm(__a )
_a = nn.Dropout(p=__a )
def UpperCamelCase__ ( self : str , __a : Union[str, Any] , __a : Dict ):
_a = self.token_embedder(__a )
_a = encoder_input_tokens.shape[1]
_a = torch.arange(__a , device=encoder_input_tokens.device )
x += self.position_encoding(__a )
_a = self.dropout_pre(__a )
# inverted the attention mask
_a = encoder_input_tokens.size()
_a = self.get_extended_attention_mask(__a , __a )
for lyr in self.encoders:
_a = lyr(__a , __a )[0]
_a = self.layer_norm(__a )
return self.dropout_post(__a ), encoder_inputs_mask
| 346 | 1 |
'''simple docstring'''
import math
def _lowerCamelCase ( lowercase : int ) -> list:
_a = [True] * n
_a = False
_a = False
_a = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
_a = i * 2
while index < n:
_a = False
_a = index + i
_a = [2]
for i in range(3 , lowercase , 2 ):
if is_prime[i]:
primes.append(lowercase )
return primes
def _lowerCamelCase ( lowercase : int = 9999_6666_3333 ) -> int:
_a = math.floor(math.sqrt(lowercase ) ) + 100
_a = prime_sieve(lowercase )
_a = 0
_a = 0
_a = primes[prime_index]
while (last_prime**2) <= limit:
_a = primes[prime_index + 1]
_a = last_prime**2
_a = next_prime**2
# Get numbers divisible by lps(current)
_a = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_a = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_a = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_a = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 346 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Any ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[str] , lowercase : Dict ) -> Union[str, Any]:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
_a = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Dict , lowercase : Dict ) -> str:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple , __a : Optional[int] , __a : Any ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Tuple , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Dict=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : List[Any] , __a : Dict ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : str ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 346 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[Any] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='deta'
__a ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[str] , __a : int=None , __a : Optional[Any]=9_00 , __a : str=20_48 , __a : Any=6 , __a : List[str]=20_48 , __a : Tuple=8 , __a : Optional[int]=6 , __a : str=10_24 , __a : str=8 , __a : Optional[int]=0.0 , __a : Optional[Any]=True , __a : Tuple="relu" , __a : Tuple=2_56 , __a : int=0.1 , __a : str=0.0 , __a : Union[str, Any]=0.0 , __a : Tuple=0.02 , __a : List[str]=1.0 , __a : Tuple=True , __a : List[str]=False , __a : int="sine" , __a : Tuple=5 , __a : Union[str, Any]=4 , __a : Any=4 , __a : Any=True , __a : List[Any]=3_00 , __a : int=True , __a : List[str]=True , __a : Any=1 , __a : Union[str, Any]=5 , __a : int=2 , __a : List[Any]=1 , __a : Any=1 , __a : str=5 , __a : str=2 , __a : Dict=0.1 , __a : List[Any]=0.25 , **__a : List[Any] , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(__a , __a ):
_a = backbone_config.pop("model_type" )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(__a )
_a = backbone_config
_a = num_queries
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = auxiliary_loss
_a = position_embedding_type
# deformable attributes
_a = num_feature_levels
_a = encoder_n_points
_a = decoder_n_points
_a = two_stage
_a = two_stage_num_proposals
_a = with_box_refine
_a = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
_a = focal_alpha
super().__init__(is_encoder_decoder=__a , **__a )
@property
def UpperCamelCase__ ( self : Dict ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self : Union[str, Any] ):
return self.d_model
def UpperCamelCase__ ( self : Any ):
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
| 346 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
lowerCAmelCase_ : str = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
lowerCAmelCase_ : Union[str, Any] = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def _lowerCamelCase ( lowercase : List[str] ) -> List[Any]:
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = numpy_to_pil(lowercase )
return images
def _lowerCamelCase ( lowercase : int ) -> List[Any]:
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
_a = [Image.fromarray(lowercase ) for image in images]
return pil_images
| 346 | 1 |
'''simple docstring'''
lowerCAmelCase_ : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def _lowerCamelCase ( lowercase : int ) -> int:
_a = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCAmelCase_ : list[bool | None] = [None] * 10_00_00_00
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : int = False
def _lowerCamelCase ( lowercase : int ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_a = chain(next_number(_UpperCAmelCase ) )
_a = number_chain
while number < 1000_0000:
_a = number_chain
number *= 10
return number_chain
def _lowerCamelCase ( lowercase : int = 1000_0000 ) -> int:
for i in range(1 , _UpperCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 350 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[int]:
_a = 10
_a = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
_a = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(lowercase ) ),
} , features=lowercase , )
return dataset
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : int ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=lowercase )
return filename
# FILE_CONTENT + files
lowerCAmelCase_ : Union[str, Any] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> List[Any]:
_a = tmp_path_factory.mktemp("data" ) / "file.txt"
_a = FILE_CONTENT
with open(lowercase , "w" ) as f:
f.write(lowercase )
return filename
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> str:
import bza
_a = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
_a = bytes(lowercase , "utf-8" )
with bza.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> Optional[Any]:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
_a = bytes(lowercase , "utf-8" )
with gzip.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Union[str, Any]:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_a = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
_a = bytes(lowercase , "utf-8" )
with lza.frame.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple ) -> Optional[Any]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_a = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(lowercase , "w" ) as archive:
archive.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : Optional[Any] ) -> Dict:
import tarfile
_a = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any ) -> Union[str, Any]:
import lzma
_a = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
_a = bytes(lowercase , "utf-8" )
with lzma.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int , lowercase : Any ) -> Union[str, Any]:
import zipfile
_a = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict ) -> List[str]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_a = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
_a = bytes(lowercase , "utf-8" )
with zstd.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> Union[str, Any]:
_a = tmp_path_factory.mktemp("data" ) / "file.xml"
_a = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(lowercase , "w" ) as f:
f.write(lowercase )
return filename
lowerCAmelCase_ : Optional[int] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowerCAmelCase_ : List[Any] = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowerCAmelCase_ : Dict = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase_ : Dict = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowerCAmelCase_ : List[Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> str:
_a = datasets.Dataset.from_dict(lowercase )
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict ) -> Dict:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
_a = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> str:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(lowercase , "w" , newline="" ) as f:
_a = csv.DictWriter(lowercase , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(lowercase , "w" , newline="" ) as f:
_a = csv.DictWriter(lowercase , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> int:
import bza
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(lowercase , "rb" ) as f:
_a = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Any , lowercase : Any ) -> List[str]:
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : Any , lowercase : List[Any] ) -> Dict:
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(lowercase , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Optional[Any] , lowercase : int ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[Any] ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
_a = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(lowercase , "wb" ) as f:
_a = pq.ParquetWriter(lowercase , schema=lowercase )
_a = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase ) )] for k in DATA[0]} , schema=lowercase )
writer.write_table(lowercase )
writer.close()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_a = {"data": DATA}
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_a = {"data": DATA_DICT_OF_LISTS}
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] ) -> str:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> List[str]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> int:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] , lowercase : Dict ) -> Tuple:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(lowercase , "rb" ) as orig_file:
with gzip.open(lowercase , "wb" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : List[Any] ) -> List[Any]:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(lowercase , "rb" ) as orig_file:
with gzip.open(lowercase , "wb" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[Any] , lowercase : int ) -> str:
_a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : int , lowercase : List[Any] ) -> Optional[int]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("nested" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[str] , lowercase : str ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : Any , lowercase : Optional[int] ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str , lowercase : List[str] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.join("nested" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> str:
_a = ["0", "1", "2", "3"]
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> Dict:
_a = ["0", "1", "2", "3"]
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
_a = ["0", "1", "2", "3"]
_a = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Union[str, Any] , lowercase : Any ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : List[str] , lowercase : List[str] ) -> Union[str, Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : int , lowercase : str ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename("unsupported.ext" ) )
f.write(lowercase , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[Any] ) -> Any:
_a = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
_a = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[Any]:
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[int]:
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> Dict:
_a = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> str:
_a = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 346 | 0 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE (A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
__a =StableDiffusionControlNetImgaImgPipeline
__a =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a =IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
__a =IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self : int ):
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
_a = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
_a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_a = CLIPTextModel(__A )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase__ ( self : str , __a : Dict , __a : List[Any]=0 ):
if str(__A ).startswith("mps" ):
_a = torch.manual_seed(__A )
else:
_a = torch.Generator(device=__A ).manual_seed(__A )
_a = 2
_a = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , )
_a = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(__A ) ).convert("RGB" ).resize((64, 64) )
_a = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def UpperCamelCase__ ( self : str ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase__ ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase__ ( self : int ):
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class __SCREAMING_SNAKE_CASE (A__ , A__ , unittest.TestCase ):
"""simple docstring"""
__a =StableDiffusionControlNetImgaImgPipeline
__a =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a =frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase__ ( self : Dict ):
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__a : Optional[int] ):
if isinstance(__A , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_a = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
_a = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
_a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_a = CLIPTextModel(__A )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = MultiControlNetModel([controlneta, controlneta] )
_a = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase__ ( self : Any , __a : int , __a : int=0 ):
if str(__A ).startswith("mps" ):
_a = torch.manual_seed(__A )
else:
_a = torch.Generator(device=__A ).manual_seed(__A )
_a = 2
_a = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
]
_a = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(__A ) ).convert("RGB" ).resize((64, 64) )
_a = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def UpperCamelCase__ ( self : Dict ):
_a = self.get_dummy_components()
_a = self.pipeline_class(**__A )
pipe.to(__A )
_a = 10.0
_a = 4
_a = self.get_dummy_inputs(__A )
_a = steps
_a = scale
_a = pipe(**__A )[0]
_a = self.get_dummy_inputs(__A )
_a = steps
_a = scale
_a = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_a = self.get_dummy_inputs(__A )
_a = steps
_a = scale
_a = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_a = self.get_dummy_inputs(__A )
_a = steps
_a = scale
_a = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def UpperCamelCase__ ( self : Optional[int] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase__ ( self : Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase__ ( self : Any ):
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def UpperCamelCase__ ( self : Dict ):
_a = self.get_dummy_components()
_a = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__A )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self : Dict ):
_a = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
_a = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=__A , controlnet=__A )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
_a = torch.Generator(device="cpu" ).manual_seed(0 )
_a = """evil space-punk bird"""
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((5_12, 5_12) )
_a = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((5_12, 5_12) )
_a = pipe(
__A , __A , control_image=__A , generator=__A , output_type="np" , num_inference_steps=50 , strength=0.6 , )
_a = output.images[0]
assert image.shape == (5_12, 5_12, 3)
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9e-2
| 351 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv2ImageProcessor'
__a =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Dict , __a : int=None , __a : List[Any]=None , **__a : str ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Optional[int] , __a : Optional[Any] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Optional[Any] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : int ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : Union[str, Any] , *__a : Optional[int] , **__a : Optional[Any] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : int ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCamelCase__ ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 346 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : Tuple ) -> Tuple: # noqa: E741
_a = len(__UpperCAmelCase )
_a = 0
_a = [0] * n
_a = [False] * n
_a = [False] * n
def dfs(lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : List[Any] ):
if parent == root:
out_edge_count += 1
_a = True
_a = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_a = dfs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_a = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_a = True
# AP found via cycle
if at == low[to]:
_a = True
else:
_a = min(low[at] , __UpperCAmelCase )
return out_edge_count
for i in range(__UpperCAmelCase ):
if not visited[i]:
_a = 0
_a = dfs(__UpperCAmelCase , __UpperCAmelCase , -1 , __UpperCAmelCase )
_a = out_edge_count > 1
for x in range(len(__UpperCAmelCase ) ):
if is_art[x] is True:
print(__UpperCAmelCase )
# Adjacency list of graph
lowerCAmelCase_ : List[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 352 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = '▁'
lowerCAmelCase_ : Optional[Any] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
lowerCAmelCase_ : Optional[int] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
lowerCAmelCase_ : List[str] = {
'facebook/s2t-small-librispeech-asr': 10_24,
}
lowerCAmelCase_ : List[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
lowerCAmelCase_ : Union[str, Any] = {'mustc': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =MAX_MODEL_INPUT_SIZES
__a =['input_ids', 'attention_mask']
__a =[]
def __init__( self : Optional[Any] , __a : Optional[Any] , __a : Any , __a : Any="<s>" , __a : List[str]="</s>" , __a : str="<pad>" , __a : List[str]="<unk>" , __a : Union[str, Any]=False , __a : Any=False , __a : List[str]=None , __a : Optional[int]=None , __a : Optional[Dict[str, Any]] = None , **__a : int , ):
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , do_upper_case=__a , do_lower_case=__a , tgt_lang=__a , lang_codes=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_a = do_upper_case
_a = do_lower_case
_a = load_json(__a )
_a = {v: k for k, v in self.encoder.items()}
_a = spm_file
_a = load_spm(__a , self.sp_model_kwargs )
if lang_codes is not None:
_a = lang_codes
_a = LANGUAGES[lang_codes]
_a = [f'<lang:{lang}>' for lang in self.langs]
_a = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
_a = self.lang_tokens
_a = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_a = {}
@property
def UpperCamelCase__ ( self : str ):
return len(self.encoder )
@property
def UpperCamelCase__ ( self : str ):
return self._tgt_lang
@tgt_lang.setter
def UpperCamelCase__ ( self : Optional[int] , __a : Any ):
_a = new_tgt_lang
self.set_tgt_lang_special_tokens(__a )
def UpperCamelCase__ ( self : List[Any] , __a : str ):
_a = self.lang_code_to_id[tgt_lang]
_a = [lang_code_id]
def UpperCamelCase__ ( self : Dict , __a : str ):
return self.sp_model.encode(__a , out_type=__a )
def UpperCamelCase__ ( self : List[str] , __a : Any ):
return self.encoder.get(__a , self.encoder[self.unk_token] )
def UpperCamelCase__ ( self : str , __a : int ):
return self.decoder.get(__a , self.unk_token )
def UpperCamelCase__ ( self : str , __a : List[str] ):
_a = []
_a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_a = self.sp_model.decode(__a )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_a = []
else:
current_sub_tokens.append(__a )
_a = self.sp_model.decode(__a )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCamelCase__ ( self : int , __a : Any , __a : int=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self : Any , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
_a = [1] * len(self.prefix_tokens )
_a = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self : str , __a : Dict ):
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase__ ( self : List[str] , __a : str , __a : Optional[str] = None ):
_a = Path(__a )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __a )
if os.path.abspath(self.spm_file ) != os.path.abspath(__a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __a )
elif not os.path.isfile(self.spm_file ):
with open(__a , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__a )
return (str(__a ), str(__a ))
def _lowerCamelCase ( lowercase : str , lowercase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_a = sentencepiece.SentencePieceProcessor(**lowercase )
spm.Load(str(lowercase ) )
return spm
def _lowerCamelCase ( lowercase : str ) -> Union[Dict, List]:
with open(lowercase , "r" ) as f:
return json.load(lowercase )
def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> None:
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase , indent=2 )
| 346 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ : Union[str, Any] = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 353 |
'''simple docstring'''
from manim import *
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Dict ):
_a = Rectangle(height=0.5 , width=0.5 )
_a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_a = [mem.copy() for i in range(6 )]
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(__a , __a ).arrange(__a , buff=0 )
_a = Text("CPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
_a = [mem.copy() for i in range(4 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("GPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Model" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
_a = []
for i, rect in enumerate(__a ):
rect.set_stroke(__a )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_a = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__a , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__a , buff=0.0 )
self.add(__a )
cpu_targs.append(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Loaded Checkpoint" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , aligned_edge=__a , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
_a = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_a = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a ) , Write(__a ) )
self.play(Write(__a , run_time=1 ) , Create(__a , run_time=1 ) )
_a = []
_a = []
for i, rect in enumerate(__a ):
_a = fill.copy().set_fill(__a , opacity=0.7 )
target.move_to(__a )
first_animations.append(GrowFromCenter(__a , run_time=1 ) )
_a = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(*__a )
self.wait()
| 346 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def _lowerCamelCase ( lowercase : Sequence[float] , lowercase : bool = False ) -> float:
if not arr:
return 0
_a = 0 if allow_empty_subarrays else float("-inf" )
_a = 0.0
for num in arr:
_a = max(0 if allow_empty_subarrays else num , curr_sum + num )
_a = max(lowerCamelCase__ , lowerCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase_ : int = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"""{max_subarray_sum(nums) = }""")
| 354 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowerCAmelCase_ : Tuple = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def _lowerCamelCase ( lowercase : List[Any] ) -> Optional[int]:
_a = test_results.split(" " )
_a = 0
_a = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_a = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCamelCase ( lowercase : str ) -> Optional[Any]:
_a = {}
_a = None
_a = False
for line in failures_short_lines.split("\n" ):
if re.search(r"_ \[doctest\]" , lowercase ):
_a = True
_a = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
_a = line
_a = False
return failures
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple , __a : str , __a : Dict ):
_a = title
_a = doc_test_results["time_spent"].split("," )[0]
_a = doc_test_results["success"]
_a = doc_test_results["failures"]
_a = self.n_success + self.n_failures
# Failures and success of the modeling tests
_a = doc_test_results
@property
def UpperCamelCase__ ( self : int ):
_a = [self._time_spent]
_a = 0
for time in time_spent:
_a = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__a ) == 1:
_a = [0, 0, time_parts[0]]
_a , _a , _a = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_a , _a , _a = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(__a )}h{int(__a )}m{int(__a )}s'
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : str ):
_a = 40
_a = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(__a , __a )}
_a = ""
for category, failures in category_failures.items():
if len(__a ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
_a = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__a )
@staticmethod
def UpperCamelCase__ ( ):
_a = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(__a )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=__a , )
def UpperCamelCase__ ( self : Tuple ):
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
_a = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
_a = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=__a , )
def UpperCamelCase__ ( self : Dict , __a : List[str] , __a : List[Any] , __a : Tuple , __a : int ):
_a = ""
for key, value in failures.items():
_a = value[:2_00] + " [Truncated]" if len(__a ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
_a = job_name
_a = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
_a = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCamelCase__ ( self : str ):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
_a = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
_a = sorted(self.doc_test_results.items() , key=lambda __a : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
_a = f'*Num failures* :{len(job_result["failed"] )} \n'
_a = job_result["failures"]
_a = self.get_reply_blocks(__a , __a , __a , text=__a )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=f'Results for {job}' , blocks=__a , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def _lowerCamelCase ( ) -> Any:
_a = os.environ["GITHUB_RUN_ID"]
_a = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
_a = requests.get(lowercase ).json()
_a = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_a = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowercase ):
_a = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase )
return {}
def _lowerCamelCase ( lowercase : str ) -> Dict:
_a = {}
if os.path.exists(lowercase ):
_a = os.listdir(lowercase )
for file in files:
try:
with open(os.path.join(lowercase , lowercase ) , encoding="utf-8" ) as f:
_a = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase , lowercase )}.' ) from e
return _artifact
def _lowerCamelCase ( ) -> str:
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , __a : str ):
_a = name
_a = []
def __str__( self : List[str] ):
return self.name
def UpperCamelCase__ ( self : str , __a : str ):
self.paths.append({"name": self.name, "path": path} )
_a = {}
_a = filter(os.path.isdir , os.listdir() )
for directory in directories:
_a = directory
if artifact_name not in _available_artifacts:
_a = Artifact(lowercase )
_available_artifacts[artifact_name].add_path(lowercase )
return _available_artifacts
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = get_job_links()
lowerCAmelCase_ : Any = retrieve_available_artifacts()
lowerCAmelCase_ : List[str] = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowerCAmelCase_ : Optional[Any] = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowerCAmelCase_ : int = github_actions_job_links.get('run_doctests')
lowerCAmelCase_ : Union[str, Any] = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
lowerCAmelCase_ : List[str] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = handle_test_results(artifact['stats'])
lowerCAmelCase_ : List[str] = failed
lowerCAmelCase_ : Optional[Any] = success
lowerCAmelCase_ : Tuple = time_spent[1:-1] + ', '
lowerCAmelCase_ : List[Any] = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
lowerCAmelCase_ : int = line.replace('FAILED ', '')
lowerCAmelCase_ : Optional[int] = line.split()[0].replace('\n', '')
if "::" in line:
lowerCAmelCase_ , lowerCAmelCase_ : str = line.split('::')
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowerCAmelCase_ : Union[str, Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowerCAmelCase_ : List[str] = all_failures[test] if test in all_failures else 'N/A'
lowerCAmelCase_ : Optional[Any] = failure
break
lowerCAmelCase_ : Tuple = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 346 | 0 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[Any] ):
super().tearDown()
gc.collect()
def UpperCamelCase__ ( self : str ):
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_a = 'xvjiarui/stable-diffusion-2-inpainting'
_a = FlaxStableDiffusionInpaintPipeline.from_pretrained(_a , safety_checker=_a )
_a = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a = jax.random.PRNGKey(0 )
_a = 50
_a = jax.device_count()
_a = num_samples * [prompt]
_a = num_samples * [init_image]
_a = num_samples * [mask_image]
_a = pipeline.prepare_inputs(_a , _a , _a )
# shard inputs and rng
_a = replicate(_a )
_a = jax.random.split(_a , jax.device_count() )
_a = shard(_a )
_a = shard(_a )
_a = shard(_a )
_a = pipeline(
_a , _a , _a , _a , _a , _a , jit=_a )
_a = output.images.reshape(_a , 5_12 , 5_12 , 3 )
_a = images[0, 2_53:2_56, 2_53:2_56, -1]
_a = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_a = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 355 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCamelCase ( ) -> str:
_a = HfArgumentParser(lowercase )
_a = parser.parse_args_into_dataclasses()[0]
_a = TensorFlowBenchmark(args=lowercase )
try:
_a = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_a = "Arg --no_{0} is no longer used, please use --no-{0} instead."
_a = " ".join(str(lowercase ).split(" " )[:-1] )
_a = ""
_a = eval(str(lowercase ).split(" " )[-1] )
_a = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase )
if len(lowercase ) > 0:
_a = full_error_msg + begin_error_msg + str(lowercase )
raise ValueError(lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 346 | 0 |
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : int ):
_a = ""
_a = ""
_a = []
def UpperCamelCase__ ( self : Union[str, Any] , __a : Optional[int] , __a : int ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_a = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_a = self.__min_dist_top_down_dp(__a , n - 1 )
_a = self.__min_dist_top_down_dp(m - 1 , __a )
_a = self.__min_dist_top_down_dp(m - 1 , n - 1 )
_a = 1 + min(__a , __a , __a )
return self.dp[m][n]
def UpperCamelCase__ ( self : List[str] , __a : Any , __a : int ):
_a = worda
_a = worda
_a = [[-1 for _ in range(len(__a ) )] for _ in range(len(__a ) )]
return self.__min_dist_top_down_dp(len(__a ) - 1 , len(__a ) - 1 )
def UpperCamelCase__ ( self : Union[str, Any] , __a : Union[str, Any] , __a : Optional[Any] ):
_a = worda
_a = worda
_a = len(__a )
_a = len(__a )
_a = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_a = j
elif j == 0: # second string is empty
_a = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_a = self.dp[i - 1][j - 1]
else:
_a = self.dp[i][j - 1]
_a = self.dp[i - 1][j]
_a = self.dp[i - 1][j - 1]
_a = 1 + min(__a , __a , __a )
return self.dp[m][n]
if __name__ == "__main__":
lowerCAmelCase_ : str = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
lowerCAmelCase_ : List[str] = input('Enter the first string: ').strip()
lowerCAmelCase_ : Union[str, Any] = input('Enter the second string: ').strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 356 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase_ : Union[str, Any] = None
try:
import msvcrt
except ImportError:
lowerCAmelCase_ : Tuple = None
try:
import fcntl
except ImportError:
lowerCAmelCase_ : Optional[int] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase_ : Any = OSError
# Data
# ------------------------------------------------
lowerCAmelCase_ : Tuple = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
lowerCAmelCase_ : Optional[int] = '3.0.12'
lowerCAmelCase_ : Tuple = None
def _lowerCamelCase ( ) -> Optional[int]:
global _logger
_a = _logger or logging.getLogger(__name__ )
return _logger
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Dict , __a : Optional[Any] ):
_a = lock_file
return None
def __str__( self : Any ):
_a = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[Any] , __a : Optional[int] ):
_a = lock
return None
def __enter__( self : str ):
return self.lock
def __exit__( self : List[Any] , __a : List[Any] , __a : Union[str, Any] , __a : Dict ):
self.lock.release()
return None
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : Union[str, Any] , __a : Optional[int]=-1 , __a : Tuple=None ):
_a = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
_a = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
_a = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_a = None
# The default timeout value.
_a = timeout
# We use this lock primarily for the lock counter.
_a = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_a = 0
return None
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self._lock_file
@property
def UpperCamelCase__ ( self : List[Any] ):
return self._timeout
@timeout.setter
def UpperCamelCase__ ( self : int , __a : List[Any] ):
_a = float(__a )
return None
def UpperCamelCase__ ( self : Dict ):
raise NotImplementedError()
def UpperCamelCase__ ( self : str ):
raise NotImplementedError()
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self._lock_file_fd is not None
def UpperCamelCase__ ( self : int , __a : int=None , __a : Tuple=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
_a = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_a = id(self )
_a = self._lock_file
_a = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_a = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCamelCase__ ( self : Union[str, Any] , __a : int=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_a = id(self )
_a = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
_a = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self : List[Any] ):
self.acquire()
return self
def __exit__( self : str , __a : str , __a : Dict , __a : Dict ):
self.release()
return None
def __del__( self : int ):
self.release(force=__a )
return None
def UpperCamelCase__ ( self : Tuple , __a : str , __a : int ):
_a = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
_a = os.path.dirname(__a )
_a = str(hash(__a ) )
_a = filename[: max_length - len(__a ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(__a , __a )
else:
return path
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : int , __a : str , __a : List[Any]=-1 , __a : List[Any]=None ):
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
_a = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def UpperCamelCase__ ( self : int ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self._lock_file_fd
_a = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , __a : Optional[Any] , __a : Union[str, Any]=-1 , __a : int=None ):
_a = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def UpperCamelCase__ ( self : Any ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_a = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : Tuple ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_a = self._lock_file_fd
_a = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
_a = fd
return None
def UpperCamelCase__ ( self : Union[str, Any] ):
os.close(self._lock_file_fd )
_a = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase_ : str = None
if msvcrt:
lowerCAmelCase_ : List[str] = WindowsFileLock
elif fcntl:
lowerCAmelCase_ : List[str] = UnixFileLock
else:
lowerCAmelCase_ : int = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 346 | 0 |
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase_ : int = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowerCAmelCase_ : Union[str, Any] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Optional[int] , lowercase : str , lowercase : Dict ) -> Union[str, Any]:
_a = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'config.{attribute}' in modeling_source
or F'getattr(config, \"{attribute}\"' in modeling_source
or F'getattr(self.config, \"{attribute}\"' in modeling_source
):
_a = True
# Deal with multi-line cases
elif (
re.search(
rF'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"' , __A , )
is not None
):
_a = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_a = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_a = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
_a = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
_a = True
if not attribute_used:
_a = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_a = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_a = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_a = True
elif attribute.endswith("_token_id" ):
_a = True
# configuration class specific cases
if not case_allowed:
_a = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_a = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def _lowerCamelCase ( lowercase : Tuple ) -> List[Any]:
_a = dict(inspect.signature(config_class.__init__ ).parameters )
_a = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
_a = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_a = {}
if len(config_class.attribute_map ) > 0:
_a = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_a = inspect.getsourcefile(__A )
_a = os.path.dirname(__A )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_a = [os.path.join(__A , __A ) for fn in os.listdir(__A ) if fn.startswith("modeling_" )]
# Get the source code strings
_a = []
for path in modeling_paths:
if os.path.isfile(__A ):
with open(__A ) as fp:
modeling_sources.append(fp.read() )
_a = []
for config_param, default_value in zip(__A , __A ):
# `attributes` here is all the variant names for `config_param`
_a = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__A , __A , __A , __A ):
unused_attributes.append(attributes[0] )
return sorted(__A )
def _lowerCamelCase ( ) -> Any:
_a = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_a = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowercase : inspect.isclass(__A )
and issubclass(__A , __A )
and inspect.getmodule(__A ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_a = check_config_attributes_being_used(__A )
if len(__A ) > 0:
_a = unused_attributes
if len(__A ) > 0:
_a = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += F'{name}: {attributes}\n'
raise ValueError(__A )
if __name__ == "__main__":
check_config_attributes()
| 357 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42
__a =42
__a =42
__a =42
__a =42
def UpperCamelCase__ ( self : str ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase__ ( self : List[str] ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = torch.arange(self.height * self.width )
_a = torch.stack(
[
pixel_indices % self.width,
torch.div(__a , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase__ ( self : List[Any] ):
_a , *_a = self.shape
_a = int(np.prod(__a ) )
_a = self.get_image_coords()
_a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_a = self.get_camera_rays(__a )
_a = rays.view(__a , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase__ ( self : Dict , __a : torch.Tensor ):
_a , *_a , _a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_a = coords.view(__a , -1 , 2 )
_a = self.resolution()
_a = self.fov()
_a = (flat.float() / (res - 1)) * 2 - 1
_a = fracs * torch.tan(fov / 2 )
_a = fracs.view(__a , -1 , 2 )
_a = (
self.z.view(__a , 1 , 3 )
+ self.x.view(__a , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__a , 1 , 3 ) * fracs[:, :, 1:]
)
_a = directions / directions.norm(dim=-1 , keepdim=__a )
_a = torch.stack(
[
torch.broadcast_to(self.origin.view(__a , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__a , *__a , 2 , 3 )
def UpperCamelCase__ ( self : Dict , __a : int , __a : int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__a , height=__a , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase ( lowercase : int ) -> DifferentiableProjectiveCamera:
_a = []
_a = []
_a = []
_a = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_a = np.array([np.sin(lowercase ), np.cos(lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_a = -z * 4
_a = np.array([np.cos(lowercase ), -np.sin(lowercase ), 0.0] )
_a = np.cross(lowercase , lowercase )
origins.append(lowercase )
xs.append(lowercase )
ys.append(lowercase )
zs.append(lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , width=lowercase , height=lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase )) , )
| 346 | 0 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCAmelCase_ : Optional[Any] = 5_00_00
lowerCAmelCase_ : str = 50_00
lowerCAmelCase_ : str = os.path.split(__file__)
lowerCAmelCase_ : Tuple = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def _lowerCamelCase ( lowercase : datasets.Dataset , lowercase : int ) -> Optional[Any]:
for i in range(lowerCamelCase_ ):
_a = dataset[i]
@get_duration
def _lowerCamelCase ( lowercase : datasets.Dataset , lowercase : Dict , lowercase : str ) -> Any:
for i in range(0 , len(lowerCamelCase_ ) , lowerCamelCase_ ):
_a = dataset[i : i + batch_size]
@get_duration
def _lowerCamelCase ( lowercase : datasets.Dataset , lowercase : int , lowercase : int ) -> Dict:
with dataset.formatted_as(type=lowerCamelCase_ ):
for i in range(lowerCamelCase_ ):
_a = dataset[i]
@get_duration
def _lowerCamelCase ( lowercase : datasets.Dataset , lowercase : Optional[int] , lowercase : List[Any] , lowercase : int ) -> Optional[Any]:
with dataset.formatted_as(type=lowerCamelCase_ ):
for i in range(0 , lowerCamelCase_ , lowerCamelCase_ ):
_a = dataset[i : i + batch_size]
def _lowerCamelCase ( ) -> str:
_a = {"""num examples""": SPEED_TEST_N_EXAMPLES}
_a = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
_a = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_a = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_a = generate_example_dataset(
os.path.join(lowerCamelCase_ , "dataset.arrow" ) , lowerCamelCase_ , num_examples=lowerCamelCase_ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(lowerCamelCase_ ) )
_a = func(lowerCamelCase_ , **lowerCamelCase_ )
print("shuffling dataset" )
_a = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(lowerCamelCase_ ) )
_a = func(
lowerCamelCase_ , **lowerCamelCase_ )
with open(lowerCamelCase_ , "wb" ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 358 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCAmelCase_ : List[str] = TypeVar('T')
lowerCAmelCase_ : Dict = TypeVar('U')
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : T | None , __a : U | None ):
_a = key
_a = val
_a = None
_a = None
def __repr__( self : Any ):
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Dict ):
_a = DoubleLinkedListNode(__a , __a )
_a = DoubleLinkedListNode(__a , __a )
_a , _a = self.rear, self.head
def __repr__( self : str ):
_a = ["DoubleLinkedList"]
_a = self.head
while node.next is not None:
rep.append(str(__a ) )
_a = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__a )
def UpperCamelCase__ ( self : int , __a : DoubleLinkedListNode[T, U] ):
_a = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_a = node
_a = previous
_a = node
_a = self.rear
def UpperCamelCase__ ( self : Any , __a : DoubleLinkedListNode[T, U] ):
if node.prev is None or node.next is None:
return None
_a = node.next
_a = node.prev
_a = None
_a = None
return node
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
__a ={}
def __init__( self : Union[str, Any] , __a : int ):
_a = DoubleLinkedList()
_a = capacity
_a = 0
_a = 0
_a = 0
_a = {}
def __repr__( self : Optional[int] ):
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self : str , __a : T ):
return key in self.cache
def UpperCamelCase__ ( self : str , __a : T ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_a = self.cache[key]
_a = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__a )
return node.val
self.miss += 1
return None
def UpperCamelCase__ ( self : Tuple , __a : T , __a : U ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_a = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__a ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_a = DoubleLinkedListNode(__a , __a )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_a = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_a = value
self.list.add(__a )
@classmethod
def UpperCamelCase__ ( cls : Tuple , __a : int = 1_28 ):
def cache_decorator_inner(__a : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*__a : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
_a = LRUCache(__a )
_a = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_a = func(*__a )
cls.decorator_function_to_instance_map[func].put(args[0] , __a )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__a , "cache_info" , __a ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 | 0 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _lowerCamelCase ( lowercase : Tuple , lowercase : Dict , lowercase : Dict ) -> List[Any]:
_a = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_a = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(lowercase ):
os.makedirs(lowercase )
_a = model.state_dict()
def to_tf_var_name(lowercase : Union[str, Any] ):
for patt, repl in iter(lowercase ):
_a = name.replace(lowercase , lowercase )
return F'bert/{name}'
def create_tf_var(lowercase : str , lowercase : List[Any] , lowercase : Optional[int] ):
_a = tf.dtypes.as_dtype(tensor.dtype )
_a = tf.get_variable(dtype=lowercase , shape=tensor.shape , name=lowercase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowercase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_a = to_tf_var_name(lowercase )
_a = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_a = torch_tensor.T
_a = create_tf_var(tensor=lowercase , name=lowercase , session=lowercase )
tf.keras.backend.set_value(lowercase , lowercase )
_a = session.run(lowercase )
print(F'Successfully created {tf_name}: {np.allclose(lowercase , lowercase )}' )
_a = tf.train.Saver(tf.trainable_variables() )
saver.save(lowercase , os.path.join(lowercase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def _lowerCamelCase ( lowercase : Optional[Any]=None ) -> Optional[int]:
_a = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=lowercase , required=lowercase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=lowercase , default=lowercase , required=lowercase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=lowercase , required=lowercase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=lowercase , required=lowercase , help="Directory in which to save tensorflow model" )
_a = parser.parse_args(lowercase )
_a = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowercase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 359 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase_ : Optional[int] = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase_ : Tuple = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _lowerCamelCase ( lowercase : str ) -> str:
re.sub("<n>" , "" , lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowercase ) )
| 346 | 0 |
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def UpperCamelCase__ ( self : Dict , __a : Any ):
raise NotImplementedError()
def UpperCamelCase__ ( self : int ):
raise NotImplementedError()
class __SCREAMING_SNAKE_CASE (A_ ):
"""simple docstring"""
def __init__( self : Tuple , __a : "AutoTokenizer" , __a : bool = False , **__a : List[Any] ):
_a = tokenizer
_a = skip_prompt
_a = decode_kwargs
# variables used in the streaming process
_a = []
_a = 0
_a = True
def UpperCamelCase__ ( self : Dict , __a : Union[str, Any] ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
_a = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_a = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_a = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
_a = text[self.print_len :]
_a = []
_a = 0
# If the last token is a CJK character, we print the characters.
elif len(snake_case__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_a = text[self.print_len :]
self.print_len += len(snake_case__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_a = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(snake_case__ )
self.on_finalized_text(snake_case__ )
def UpperCamelCase__ ( self : str ):
if len(self.token_cache ) > 0:
_a = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_a = text[self.print_len :]
_a = []
_a = 0
else:
_a = ""
_a = True
self.on_finalized_text(snake_case__ , stream_end=snake_case__ )
def UpperCamelCase__ ( self : List[str] , __a : str , __a : bool = False ):
print(snake_case__ , flush=snake_case__ , end="" if not stream_end else None )
def UpperCamelCase__ ( self : str , __a : Optional[int] ):
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
class __SCREAMING_SNAKE_CASE (A_ ):
"""simple docstring"""
def __init__( self : List[Any] , __a : "AutoTokenizer" , __a : bool = False , __a : Optional[float] = None , **__a : List[str] ):
super().__init__(snake_case__ , snake_case__ , **snake_case__ )
_a = Queue()
_a = None
_a = timeout
def UpperCamelCase__ ( self : Any , __a : str , __a : bool = False ):
self.text_queue.put(snake_case__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Optional[int] ):
return self
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 360 |
'''simple docstring'''
import requests
lowerCAmelCase_ : List[Any] = 'YOUR API KEY'
def _lowerCamelCase ( lowercase : str , lowercase : str = giphy_api_key ) -> list:
_a = "+".join(query.split() )
_a = F'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'
_a = requests.get(lowercase ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 346 | 0 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__a =['pixel_values']
def __init__( self : Tuple , __a : Optional[Any] = True , __a : str = 1 / 2_55 , __a : str = True , __a : Optional[Any] = 8 , **__a : int , ):
super().__init__(**_a )
_a = do_rescale
_a = rescale_factor
_a = do_pad
_a = pad_size
def UpperCamelCase__ ( self : str , __a : Any , __a : Optional[Any] , __a : Tuple = None , **__a : List[Any] ):
return rescale(_a , scale=_a , data_format=_a , **_a )
def UpperCamelCase__ ( self : Any , __a : int , __a : Union[str, Any] , __a : Optional[int] = None ):
_a , _a = get_image_size(_a )
_a = (old_height // size + 1) * size - old_height
_a = (old_width // size + 1) * size - old_width
return pad(_a , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=_a )
def UpperCamelCase__ ( self : Union[str, Any] , __a : str , __a : Tuple = None , __a : Union[str, Any] = None , __a : Union[str, Any] = None , __a : int = None , __a : List[str] = None , __a : Optional[Any] = ChannelDimension.FIRST , **__a : Optional[int] , ):
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_pad if do_pad is not None else self.do_pad
_a = pad_size if pad_size is not None else self.pad_size
_a = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
_a = [to_numpy_array(_a ) for image in images]
if do_rescale:
_a = [self.rescale(image=_a , scale=_a ) for image in images]
if do_pad:
_a = [self.pad(_a , size=_a ) for image in images]
_a = [to_channel_dimension_format(_a , _a ) for image in images]
_a = {"pixel_values": images}
return BatchFeature(data=_a , tensor_type=_a )
| 361 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : str = '▁'
lowerCAmelCase_ : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =BertGenerationTokenizer
__a =False
__a =True
def UpperCamelCase__ ( self : Optional[Any] ):
super().setUp()
_a = BertGenerationTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self : Tuple ):
_a = "<s>"
_a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def UpperCamelCase__ ( self : List[str] ):
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(__a ) , 10_02 )
def UpperCamelCase__ ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def UpperCamelCase__ ( self : Tuple ):
_a = BertGenerationTokenizer(__a , keep_accents=__a )
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [2_85, 46, 10, 1_70, 3_82] , )
_a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_a = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_a = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase__ ( self : Any ):
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def UpperCamelCase__ ( self : List[str] ):
_a = "Hello World!"
_a = [1_85_36, 22_60, 1_01]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
_a = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_a = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@require_torch
@slow
def UpperCamelCase__ ( self : Tuple ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_a = list(self.big_tokenizer.get_vocab().keys() )[:10]
_a = " ".join(__a )
_a = self.big_tokenizer.encode_plus(__a , return_tensors="pt" , return_token_type_ids=__a )
_a = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__a )
_a = BertGenerationConfig()
_a = BertGenerationEncoder(__a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__a )
model(**__a )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
# fmt: off
_a = {"input_ids": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 346 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[int]:
if number > 0:
raise ValueError("input must be a negative integer" )
_a = len(bin(lowercase__ )[3:] )
_a = bin(abs(lowercase__ ) - (1 << binary_number_length) )[3:]
_a = (
(
'1'
+ '0' * (binary_number_length - len(lowercase__ ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Union[str, Any]:
_enforce_args(lowercase , lowercase )
if n == 0:
return 0
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + naive_cut_rod_recursive(n - i , lowercase ) )
return max_revue
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Tuple:
_enforce_args(lowercase , lowercase )
_a = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowercase , lowercase , lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : list , lowercase : list ) -> List[str]:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowercase , lowercase ) , )
_a = max_revenue
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Any:
_enforce_args(lowercase , lowercase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_a = [float("-inf" ) for _ in range(n + 1 )]
_a = 0
for i in range(1 , n + 1 ):
_a = max_rev[i]
for j in range(1 , i + 1 ):
_a = max(lowercase , prices[j - 1] + max_rev[i - j] )
_a = max_revenue_i
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Dict:
if n < 0:
_a = F'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(lowercase )
if n > len(lowercase ):
_a = (
"Each integral piece of rod must have a corresponding price. "
F'Got n = {n} but length of prices = {len(lowercase )}'
)
raise ValueError(lowercase )
def _lowerCamelCase ( ) -> Any:
_a = [6, 10, 12, 15, 20, 23]
_a = len(lowercase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_a = 36
_a = top_down_cut_rod(lowercase , lowercase )
_a = bottom_up_cut_rod(lowercase , lowercase )
_a = naive_cut_rod_recursive(lowercase , lowercase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 346 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __SCREAMING_SNAKE_CASE (__lowercase ):
"""simple docstring"""
def UpperCamelCase__ ( self : str , __a : float ):
return 0.0
def _lowerCamelCase ( lowercase : List[str] , lowercase : Optional[int] ) -> tuple[int | float, int | float]:
_a = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_a = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _lowerCamelCase ( lowercase : str , lowercase : List[Any] ) -> None:
_a = 512
_a = [1] + [0] * (size - 1)
_a = [filter_type.process(_lowerCamelCase ) for item in inputs]
_a = [0] * (samplerate - size) # zero-padding
outputs += filler
_a = np.abs(np.fft.fft(_lowerCamelCase ) )
_a = 20 * np.logaa(_lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_a = get_bounds(_lowerCamelCase , _lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(_lowerCamelCase )
plt.show()
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : List[Any] ) -> None:
_a = 512
_a = [1] + [0] * (size - 1)
_a = [filter_type.process(_lowerCamelCase ) for item in inputs]
_a = [0] * (samplerate - size) # zero-padding
outputs += filler
_a = np.angle(np.fft.fft(_lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(_lowerCamelCase , -2 * pi ) )
plt.show()
| 363 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , *__a : Optional[int] , **__a : List[str] ):
super().__init__(*__a , **__a )
self.check_model_type(__a )
def UpperCamelCase__ ( self : Optional[Any] , __a : Dict=None , __a : int=None , __a : Optional[Any]=None , **__a : List[Any] ):
_a , _a = {}, {}
if padding is not None:
_a = padding
if truncation is not None:
_a = truncation
if top_k is not None:
_a = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any] , __a : Union["Image.Image", str] , __a : str = None , **__a : Any ):
if isinstance(__a , (Image.Image, str) ) and isinstance(__a , __a ):
_a = {"image": image, "question": question}
else:
_a = image
_a = super().__call__(__a , **__a )
return results
def UpperCamelCase__ ( self : Tuple , __a : Tuple , __a : Optional[Any]=False , __a : List[Any]=False ):
_a = load_image(inputs["image"] )
_a = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=__a , truncation=__a )
_a = self.image_processor(images=__a , return_tensors=self.framework )
model_inputs.update(__a )
return model_inputs
def UpperCamelCase__ ( self : List[Any] , __a : List[str] ):
_a = self.model(**__a )
return model_outputs
def UpperCamelCase__ ( self : int , __a : Optional[int] , __a : Dict=5 ):
if top_k > self.model.config.num_labels:
_a = self.model.config.num_labels
if self.framework == "pt":
_a = model_outputs.logits.sigmoid()[0]
_a , _a = probs.topk(__a )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
_a = scores.tolist()
_a = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
| 346 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase_ : Optional[int] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase_ : List[Any] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase_ : Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
lowerCAmelCase_ : List[str] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
lowerCAmelCase_ : Optional[int] = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
lowerCAmelCase_ : str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
lowerCAmelCase_ : Any = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
lowerCAmelCase_ : Optional[Any] = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __SCREAMING_SNAKE_CASE (a_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__a =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __SCREAMING_SNAKE_CASE (a_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__a =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : Optional[Any] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
lowerCAmelCase_ : Dict = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
lowerCAmelCase_ : Dict = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(a_ )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __call__( self : Optional[Any] , __a : List[str] , __a : Any = None , __a : Union[str, Any] = None , __a : int = False , __a : Any = False , __a : Union[str, Any] = None , __a : Optional[Any] = None , __a : Union[str, Any] = None , **__a : Optional[int] , ):
if titles is None and texts is None:
return super().__call__(
lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
elif titles is None or texts is None:
_a = titles if texts is None else texts
return super().__call__(
lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
_a = titles if not isinstance(lowercase_ , lowercase_ ) else [titles]
_a = texts if not isinstance(lowercase_ , lowercase_ ) else [texts]
_a = len(lowercase_ )
_a = questions if not isinstance(lowercase_ , lowercase_ ) else [questions] * n_passages
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
f'There should be as many titles than texts but got {len(lowercase_ )} titles and {len(lowercase_ )} texts.' )
_a = super().__call__(lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ )["input_ids"]
_a = super().__call__(lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ )["input_ids"]
_a = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase_ , lowercase_ )
]
}
if return_attention_mask is not False:
_a = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_a = attention_mask
return self.pad(lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ )
def UpperCamelCase__ ( self : int , __a : int , __a : List[Any] , __a : Tuple = 16 , __a : Union[str, Any] = 64 , __a : Union[str, Any] = 4 , ):
_a = reader_input["input_ids"]
_a , _a , _a = reader_output[:3]
_a = len(lowercase_ )
_a = sorted(range(lowercase_ ) , reverse=lowercase_ , key=relevance_logits.__getitem__ )
_a = []
for doc_id in sorted_docs:
_a = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_a = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_a = sequence_ids.index(self.pad_token_id )
else:
_a = len(lowercase_ )
_a = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase_ , top_spans=lowercase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase_ , start_index=lowercase_ , end_index=lowercase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowercase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase__ ( self : Dict , __a : Optional[Any] , __a : Tuple , __a : List[Any] , __a : List[str] , ):
_a = []
for start_index, start_score in enumerate(lowercase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_a = sorted(lowercase_ , key=lambda __a : x[1] , reverse=lowercase_ )
_a = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'Wrong span indices: [{start_index}:{end_index}]' )
_a = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowercase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a_ )
class __SCREAMING_SNAKE_CASE (a_ , a_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =READER_PRETRAINED_VOCAB_FILES_MAP
__a =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =READER_PRETRAINED_INIT_CONFIGURATION
__a =['''input_ids''', '''attention_mask''']
| 364 |
'''simple docstring'''
from random import randint, random
def _lowerCamelCase ( lowercase : int , lowercase : int , lowercase : int , lowercase : bool = False , lowercase : bool = False , lowercase : int = 5 , ) -> list:
_a = [[-1] * number_of_cells] # Create a highway without any car
_a = 0
_a = max(lowercase , 0 )
while i < number_of_cells:
_a = (
randint(0 , lowercase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _lowerCamelCase ( lowercase : list , lowercase : int ) -> int:
_a = 0
_a = highway_now[car_index + 1 :]
for cell in range(len(lowercase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowercase , -1 )
def _lowerCamelCase ( lowercase : list , lowercase : float , lowercase : int ) -> list:
_a = len(lowercase )
# Beforce calculations, the highway is empty
_a = [-1] * number_of_cells
for car_index in range(lowercase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_a = min(highway_now[car_index] + 1 , lowercase )
# Number of empty cell before the next car
_a = get_distance(lowercase , lowercase ) - 1
# We can't have the car causing an accident
_a = min(next_highway[car_index] , lowercase )
if random() < probability:
# Randomly, a driver will slow down
_a = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _lowerCamelCase ( lowercase : list , lowercase : int , lowercase : float , lowercase : int ) -> list:
_a = len(highway[0] )
for i in range(lowercase ):
_a = update(highway[i] , lowercase , lowercase )
_a = [-1] * number_of_cells
for car_index in range(lowercase ):
_a = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_a = (car_index + speed) % number_of_cells
# Commit the change of position
_a = speed
highway.append(lowercase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 | 0 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase_ : List[str] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
lowerCAmelCase_ : List[Any] = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
lowerCAmelCase_ : Tuple = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def _lowerCamelCase ( lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : bool , lowercase : Optional[Dict[int, int]] = None , lowercase : bool = False , ) -> Any:
if label_map is not None:
for old_id, new_id in label_map.items():
_a = new_id
# turn into Numpy arrays
_a = np.array(__a )
_a = np.array(__a )
if reduce_labels:
_a = 255
_a = label - 1
_a = 255
_a = label != ignore_index
_a = np.not_equal(__a , __a )
_a = pred_label[mask]
_a = np.array(__a )[mask]
_a = pred_label[pred_label == label]
_a = np.histogram(__a , bins=__a , range=(0, num_labels - 1) )[0]
_a = np.histogram(__a , bins=__a , range=(0, num_labels - 1) )[0]
_a = np.histogram(__a , bins=__a , range=(0, num_labels - 1) )[0]
_a = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _lowerCamelCase ( lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Any , lowercase : bool , lowercase : Optional[Dict[int, int]] = None , lowercase : bool = False , ) -> Tuple:
_a = np.zeros((num_labels,) , dtype=np.floataa )
_a = np.zeros((num_labels,) , dtype=np.floataa )
_a = np.zeros((num_labels,) , dtype=np.floataa )
_a = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__a , __a ):
_a = intersect_and_union(
__a , __a , __a , __a , __a , __a )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[Any] , lowercase : bool , lowercase : Optional[int] = None , lowercase : Optional[Dict[int, int]] = None , lowercase : bool = False , ) -> Optional[Any]:
_a = total_intersect_and_union(
__a , __a , __a , __a , __a , __a )
# compute metrics
_a = {}
_a = total_area_intersect.sum() / total_area_label.sum()
_a = total_area_intersect / total_area_union
_a = total_area_intersect / total_area_label
_a = np.nanmean(__a )
_a = np.nanmean(__a )
_a = all_acc
_a = iou
_a = acc
if nan_to_num is not None:
_a = {metric: np.nan_to_num(__a , nan=__a ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE (datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__ ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
} ) , reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
] , )
def UpperCamelCase__ ( self : Union[str, Any] , __a : str , __a : int , __a : List[Any] , __a : Any , __a : List[Any] = None , __a : List[str] = None , __a : str = False , ):
_a = mean_iou(
results=_a , gt_seg_maps=_a , num_labels=_a , ignore_index=_a , nan_to_num=_a , label_map=_a , reduce_labels=_a , )
return iou_result
| 365 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 10 ) -> str:
if not isinstance(lowercase , lowercase ) or n < 0:
raise ValueError("Invalid input" )
_a = 10**n
_a = 2_8433 * (pow(2 , 783_0457 , lowercase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""")
| 346 | 0 |
'''simple docstring'''
import os
def _lowerCamelCase ( ) -> Tuple:
with open(os.path.dirname(lowerCAmelCase__ ) + "/grid.txt" ) as f:
_a = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowerCAmelCase__ ) for x in f.readline().split()] )
_a = 0
# right
for i in range(20 ):
for j in range(17 ):
_a = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_a = temp
# down
for i in range(17 ):
for j in range(20 ):
_a = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_a = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
_a = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_a = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
_a = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_a = temp
return maximum
if __name__ == "__main__":
print(solution())
| 366 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 6008_5147_5143 ) -> int:
try:
_a = int(lowercase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
_a = 2
_a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_a = i
while n % i == 0:
_a = n // i
i += 1
return int(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 346 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 1000 ) -> int:
_a = 1, 1
_a = 2
while True:
_a = 0
_a = fa + fa
_a = fa, f
index += 1
for _ in str(a_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 367 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
lowerCAmelCase_ : List[Any] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = {'facebook/bart-base': BartForConditionalGeneration}
lowerCAmelCase_ : int = {'facebook/bart-base': BartTokenizer}
def _lowerCamelCase ( ) -> Union[str, Any]:
_a = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=lowercase , default=lowercase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=lowercase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=lowercase , default=lowercase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=lowercase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=lowercase , )
parser.add_argument(
"--config_name" , type=lowercase , default=lowercase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=lowercase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=lowercase , default=lowercase , help="Where to store the final ONNX file." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple="cpu" ) -> Optional[Any]:
_a = model_dict[model_name].from_pretrained(lowercase ).to(lowercase )
_a = tokenizer_dict[model_name].from_pretrained(lowercase )
if model_name in ["facebook/bart-base"]:
_a = 0
_a = None
_a = 0
return huggingface_model, tokenizer
def _lowerCamelCase ( lowercase : List[str] , lowercase : Tuple , lowercase : int , lowercase : Any , lowercase : Dict ) -> Any:
model.eval()
_a = None
_a = torch.jit.script(BARTBeamSearchGenerator(lowercase ) )
with torch.no_grad():
_a = "My friends are cool but they eat too many carbs."
_a = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="pt" ).to(model.device )
_a = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=lowercase , max_length=lowercase , early_stopping=lowercase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
lowercase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , lowercase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=lowercase , )
logger.info("Model exported to {}".format(lowercase ) )
_a = remove_dup_initializers(os.path.abspath(lowercase ) )
logger.info("Deduplicated and optimized model written to {}".format(lowercase ) )
_a = onnxruntime.InferenceSession(lowercase )
_a = ort_sess.run(
lowercase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(lowercase ),
"max_length": np.array(lowercase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def _lowerCamelCase ( ) -> Any:
_a = parse_args()
_a = 5
_a = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_a = torch.device(args.device )
_a , _a = load_model_tokenizer(args.model_name_or_path , lowercase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(lowercase )
if args.max_length:
_a = args.max_length
if args.num_beams:
_a = args.num_beams
if args.output_file_path:
_a = args.output_file_path
else:
_a = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(lowercase , lowercase , lowercase , lowercase , lowercase )
if __name__ == "__main__":
main()
| 346 | 0 |
from collections import defaultdict
from math import ceil, sqrt
def _lowerCamelCase ( lowercase : int = 100_0000 , lowercase : int = 10 ) -> int:
_a = defaultdict(_UpperCamelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_a = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_a = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_UpperCamelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 368 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase_ : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def _lowerCamelCase ( lowercase : str ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase )
def _lowerCamelCase ( lowercase : Dict ) -> str:
from transformers.testing_utils import pytest_terminal_summary_main
_a = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(lowercase , id=lowercase )
| 346 | 0 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _lowerCamelCase ( *lowercase : Optional[Any] , lowercase : Tuple = None , lowercase : List[Any]=True , lowercase : Optional[Any]=2 ) -> Optional[Any]:
from .. import __version__
_a = take_from
_a = ()
if not isinstance(args[0] , a__ ):
_a = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(a__ ).base_version ) >= version.parse(a__ ):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}' )
_a = None
if isinstance(a__ , a__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(a__ ),)
_a = F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(a__ , a__ ):
values += (getattr(a__ , a__ ),)
_a = F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
_a = F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
_a = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , a__ , stacklevel=a__ )
if isinstance(a__ , a__ ) and len(a__ ) > 0:
_a = inspect.getouterframes(inspect.currentframe() )[1]
_a = call_frame.filename
_a = call_frame.lineno
_a = call_frame.function
_a = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(a__ ) == 0:
return
elif len(a__ ) == 1:
return values[0]
return values
| 369 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : List[Any] , __a : int , __a : int , __a : int , __a : float , __a : int , __a : int , __a : int , __a : int , __a : str , __a : bool = False , ):
super().__init__()
_a = nn.Embedding(__a , __a )
_a = nn.Embedding(__a , __a )
_a = False
_a = nn.Dropout(p=__a )
_a = TaConfig(
vocab_size=__a , d_model=__a , num_heads=__a , d_kv=__a , d_ff=__a , dropout_rate=__a , feed_forward_proj=__a , is_decoder=__a , is_encoder_decoder=__a , )
_a = nn.ModuleList()
for lyr_num in range(__a ):
_a = TaBlock(__a )
self.encoders.append(__a )
_a = TaLayerNorm(__a )
_a = nn.Dropout(p=__a )
def UpperCamelCase__ ( self : str , __a : Union[str, Any] , __a : Dict ):
_a = self.token_embedder(__a )
_a = encoder_input_tokens.shape[1]
_a = torch.arange(__a , device=encoder_input_tokens.device )
x += self.position_encoding(__a )
_a = self.dropout_pre(__a )
# inverted the attention mask
_a = encoder_input_tokens.size()
_a = self.get_extended_attention_mask(__a , __a )
for lyr in self.encoders:
_a = lyr(__a , __a )[0]
_a = self.layer_norm(__a )
return self.dropout_post(__a ), encoder_inputs_mask
| 346 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
lowerCAmelCase_ : int = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class __SCREAMING_SNAKE_CASE (__UpperCamelCase ):
"""simple docstring"""
__a ="tapas"
def __init__( self : Optional[int] , __a : List[str]=3_05_22 , __a : Tuple=7_68 , __a : int=12 , __a : List[str]=12 , __a : Optional[Any]=30_72 , __a : List[Any]="gelu" , __a : Dict=0.1 , __a : Tuple=0.1 , __a : List[Any]=10_24 , __a : Dict=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , __a : int=0.02 , __a : List[Any]=1e-1_2 , __a : Dict=0 , __a : Dict=10.0 , __a : Any=0 , __a : int=1.0 , __a : int=None , __a : int=1.0 , __a : List[Any]=False , __a : Tuple=None , __a : Optional[int]=1.0 , __a : List[str]=1.0 , __a : Any=False , __a : List[Any]=False , __a : Union[str, Any]="ratio" , __a : List[Any]=None , __a : int=None , __a : Dict=64 , __a : str=32 , __a : str=False , __a : List[Any]=True , __a : Optional[int]=False , __a : List[Any]=False , __a : List[Any]=True , __a : Any=False , __a : Tuple=None , __a : Tuple=None , **__a : Union[str, Any] , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_sizes
_a = initializer_range
_a = layer_norm_eps
# Fine-tuning task hyperparameters
_a = positive_label_weight
_a = num_aggregation_labels
_a = aggregation_loss_weight
_a = use_answer_as_supervision
_a = answer_loss_importance
_a = use_normalized_answer_loss
_a = huber_loss_delta
_a = temperature
_a = aggregation_temperature
_a = use_gumbel_for_cells
_a = use_gumbel_for_aggregation
_a = average_approximation_function
_a = cell_selection_preference
_a = answer_loss_cutoff
_a = max_num_rows
_a = max_num_columns
_a = average_logits_per_cell
_a = select_one_column
_a = allow_empty_column_selection
_a = init_cell_selection_weights_to_zero
_a = reset_position_index_per_cell
_a = disable_per_token_loss
# Aggregation hyperparameters
_a = aggregation_labels
_a = no_aggregation_label_index
if isinstance(self.aggregation_labels , _lowerCAmelCase ):
_a = {int(_lowerCAmelCase ): v for k, v in aggregation_labels.items()}
| 370 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Any ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[str] , lowercase : Dict ) -> Union[str, Any]:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
_a = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Dict , lowercase : Dict ) -> str:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple , __a : Optional[int] , __a : Any ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Tuple , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Dict=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : List[Any] , __a : Dict ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : str ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 346 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowerCamelCase ( lowercase : NDArray[floataa] , lowercase : NDArray[floataa] , lowercase : list[int] , lowercase : int , ) -> list[float]:
_a = coefficient_matrix.shape
_a = constant_matrix.shape
if rowsa != colsa:
_a = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(UpperCAmelCase__ )
if colsa != 1:
_a = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(UpperCAmelCase__ )
if rowsa != rowsa:
_a = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != rowsa:
_a = (
"""Number of initial values must be equal to number of rows in coefficient """
F'matrix but received {len(UpperCAmelCase__ )} and {rowsa}'
)
raise ValueError(UpperCAmelCase__ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
_a = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_a = table.shape
strictly_diagonally_dominant(UpperCAmelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCAmelCase__ ):
_a = []
for row in range(UpperCAmelCase__ ):
_a = 0
for col in range(UpperCAmelCase__ ):
if col == row:
_a = table[row][col]
elif col == cols - 1:
_a = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_a = (temp + val) / denom
new_val.append(UpperCAmelCase__ )
_a = new_val
return [float(UpperCAmelCase__ ) for i in new_val]
def _lowerCamelCase ( lowercase : NDArray[floataa] ) -> bool:
_a = table.shape
_a = True
for i in range(0 , UpperCAmelCase__ ):
_a = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
lowerCAmelCase_ : str = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
lowerCAmelCase_ : Union[str, Any] = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def _lowerCamelCase ( lowercase : List[str] ) -> List[Any]:
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = numpy_to_pil(lowercase )
return images
def _lowerCamelCase ( lowercase : int ) -> List[Any]:
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
_a = [Image.fromarray(lowercase ) for image in images]
return pil_images
| 346 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __SCREAMING_SNAKE_CASE (__lowercase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCamelCase__ ( __a : int ):
raise NotImplementedError()
@abstractmethod
def UpperCamelCase__ ( self : Tuple ):
raise NotImplementedError()
| 350 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[int]:
_a = 10
_a = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
_a = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(lowercase ) ),
} , features=lowercase , )
return dataset
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : int ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=lowercase )
return filename
# FILE_CONTENT + files
lowerCAmelCase_ : Union[str, Any] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> List[Any]:
_a = tmp_path_factory.mktemp("data" ) / "file.txt"
_a = FILE_CONTENT
with open(lowercase , "w" ) as f:
f.write(lowercase )
return filename
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> str:
import bza
_a = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
_a = bytes(lowercase , "utf-8" )
with bza.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> Optional[Any]:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
_a = bytes(lowercase , "utf-8" )
with gzip.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Union[str, Any]:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_a = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
_a = bytes(lowercase , "utf-8" )
with lza.frame.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple ) -> Optional[Any]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_a = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(lowercase , "w" ) as archive:
archive.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : Optional[Any] ) -> Dict:
import tarfile
_a = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any ) -> Union[str, Any]:
import lzma
_a = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
_a = bytes(lowercase , "utf-8" )
with lzma.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int , lowercase : Any ) -> Union[str, Any]:
import zipfile
_a = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict ) -> List[str]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_a = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
_a = bytes(lowercase , "utf-8" )
with zstd.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> Union[str, Any]:
_a = tmp_path_factory.mktemp("data" ) / "file.xml"
_a = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(lowercase , "w" ) as f:
f.write(lowercase )
return filename
lowerCAmelCase_ : Optional[int] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowerCAmelCase_ : List[Any] = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowerCAmelCase_ : Dict = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase_ : Dict = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowerCAmelCase_ : List[Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> str:
_a = datasets.Dataset.from_dict(lowercase )
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict ) -> Dict:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
_a = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> str:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(lowercase , "w" , newline="" ) as f:
_a = csv.DictWriter(lowercase , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(lowercase , "w" , newline="" ) as f:
_a = csv.DictWriter(lowercase , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> int:
import bza
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(lowercase , "rb" ) as f:
_a = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Any , lowercase : Any ) -> List[str]:
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : Any , lowercase : List[Any] ) -> Dict:
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(lowercase , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Optional[Any] , lowercase : int ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[Any] ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
_a = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(lowercase , "wb" ) as f:
_a = pq.ParquetWriter(lowercase , schema=lowercase )
_a = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase ) )] for k in DATA[0]} , schema=lowercase )
writer.write_table(lowercase )
writer.close()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_a = {"data": DATA}
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_a = {"data": DATA_DICT_OF_LISTS}
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] ) -> str:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> List[str]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> int:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] , lowercase : Dict ) -> Tuple:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(lowercase , "rb" ) as orig_file:
with gzip.open(lowercase , "wb" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : List[Any] ) -> List[Any]:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(lowercase , "rb" ) as orig_file:
with gzip.open(lowercase , "wb" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[Any] , lowercase : int ) -> str:
_a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : int , lowercase : List[Any] ) -> Optional[int]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("nested" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[str] , lowercase : str ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : Any , lowercase : Optional[int] ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str , lowercase : List[str] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.join("nested" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> str:
_a = ["0", "1", "2", "3"]
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> Dict:
_a = ["0", "1", "2", "3"]
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
_a = ["0", "1", "2", "3"]
_a = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Union[str, Any] , lowercase : Any ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : List[str] , lowercase : List[str] ) -> Union[str, Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : int , lowercase : str ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename("unsupported.ext" ) )
f.write(lowercase , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[Any] ) -> Any:
_a = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
_a = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[Any]:
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[int]:
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> Dict:
_a = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> str:
_a = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 346 | 0 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __SCREAMING_SNAKE_CASE (lowercase__ ):
"""simple docstring"""
def UpperCamelCase__ ( self : int ):
_a = tempfile.mkdtemp()
_a = 8
# DPR tok
_a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_a = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
_a = os.path.join(lowercase_ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
_a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_a = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
_a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_a = {"unk_token": "<unk>"}
_a = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
_a = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES["vocab_file"] )
_a = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def UpperCamelCase__ ( self : Optional[Any] ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCamelCase__ ( self : Tuple ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCamelCase__ ( self : str ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self : Tuple ):
_a = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self.get_dummy_dataset()
_a = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
_a = dataset
_a = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCamelCase__ ( self : Optional[int] , __a : Dict ):
_a = self.get_dummy_dataset()
_a = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
_a = os.path.join(self.tmpdirname , "dataset" )
_a = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
_a = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_a = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , lowercase_ ) , )
return retriever
def UpperCamelCase__ ( self : List[str] ):
_a = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
_a = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
_a = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
_a = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(lowercase_ , open(lowercase_ , "wb" ) )
_a = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
_a = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCamelCase__ ( self : Tuple ):
_a = 1
_a = self.get_dummy_canonical_hf_index_retriever()
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , lowercase_ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase__ ( self : int ):
_a = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
_a = self.get_dummy_dataset()
retriever.save_pretrained(lowercase_ )
_a = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase__ ( self : int ):
_a = 1
_a = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , lowercase_ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowercase_ )
_a = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase__ ( self : List[str] ):
_a = 1
_a = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , lowercase_ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase__ ( self : List[Any] ):
_a = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowercase_ )
_a = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase__ ( self : Dict ):
_a = 1
_a = self.get_dummy_legacy_index_retriever()
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , lowercase_ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowercase_ )
_a = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCamelCase__ ( self : List[Any] ):
import torch
_a = 1
_a = self.get_dummy_canonical_hf_index_retriever()
_a = [[5, 7], [10, 11]]
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever(lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ )
_a = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertIsInstance(lowercase_ , np.ndarray )
_a = retriever(
lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ , return_tensors="pt" , )
_a = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowercase_ , torch.Tensor )
self.assertIsInstance(lowercase_ , torch.Tensor )
self.assertIsInstance(lowercase_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCamelCase__ ( self : Dict ):
_a = self.get_dpr_ctx_encoder_tokenizer()
_a = 1
_a = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
retriever.set_ctx_encoder_tokenizer(lowercase_ )
_a = [[5, 7], [10, 11]]
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever(lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ )
self.assertEqual(
len(lowercase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , lowercase_ ) # check for doc token related keys in dictionary.
| 351 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv2ImageProcessor'
__a =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Dict , __a : int=None , __a : List[Any]=None , **__a : str ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Optional[int] , __a : Optional[Any] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Optional[Any] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : int ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : Union[str, Any] , *__a : Optional[int] , **__a : Optional[Any] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : int ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCamelCase__ ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 346 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : float , lowercase : float , lowercase : float , lowercase : float , lowercase : float , ) -> float:
_a = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
_a = 1 - (matter_density + radiation_density + dark_energy)
_a = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_a = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCAmelCase_ : Any = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 352 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = '▁'
lowerCAmelCase_ : Optional[Any] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
lowerCAmelCase_ : Optional[int] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
lowerCAmelCase_ : List[str] = {
'facebook/s2t-small-librispeech-asr': 10_24,
}
lowerCAmelCase_ : List[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
lowerCAmelCase_ : Union[str, Any] = {'mustc': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =MAX_MODEL_INPUT_SIZES
__a =['input_ids', 'attention_mask']
__a =[]
def __init__( self : Optional[Any] , __a : Optional[Any] , __a : Any , __a : Any="<s>" , __a : List[str]="</s>" , __a : str="<pad>" , __a : List[str]="<unk>" , __a : Union[str, Any]=False , __a : Any=False , __a : List[str]=None , __a : Optional[int]=None , __a : Optional[Dict[str, Any]] = None , **__a : int , ):
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , do_upper_case=__a , do_lower_case=__a , tgt_lang=__a , lang_codes=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_a = do_upper_case
_a = do_lower_case
_a = load_json(__a )
_a = {v: k for k, v in self.encoder.items()}
_a = spm_file
_a = load_spm(__a , self.sp_model_kwargs )
if lang_codes is not None:
_a = lang_codes
_a = LANGUAGES[lang_codes]
_a = [f'<lang:{lang}>' for lang in self.langs]
_a = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
_a = self.lang_tokens
_a = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_a = {}
@property
def UpperCamelCase__ ( self : str ):
return len(self.encoder )
@property
def UpperCamelCase__ ( self : str ):
return self._tgt_lang
@tgt_lang.setter
def UpperCamelCase__ ( self : Optional[int] , __a : Any ):
_a = new_tgt_lang
self.set_tgt_lang_special_tokens(__a )
def UpperCamelCase__ ( self : List[Any] , __a : str ):
_a = self.lang_code_to_id[tgt_lang]
_a = [lang_code_id]
def UpperCamelCase__ ( self : Dict , __a : str ):
return self.sp_model.encode(__a , out_type=__a )
def UpperCamelCase__ ( self : List[str] , __a : Any ):
return self.encoder.get(__a , self.encoder[self.unk_token] )
def UpperCamelCase__ ( self : str , __a : int ):
return self.decoder.get(__a , self.unk_token )
def UpperCamelCase__ ( self : str , __a : List[str] ):
_a = []
_a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_a = self.sp_model.decode(__a )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_a = []
else:
current_sub_tokens.append(__a )
_a = self.sp_model.decode(__a )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCamelCase__ ( self : int , __a : Any , __a : int=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self : Any , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
_a = [1] * len(self.prefix_tokens )
_a = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self : str , __a : Dict ):
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase__ ( self : List[str] , __a : str , __a : Optional[str] = None ):
_a = Path(__a )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __a )
if os.path.abspath(self.spm_file ) != os.path.abspath(__a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __a )
elif not os.path.isfile(self.spm_file ):
with open(__a , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__a )
return (str(__a ), str(__a ))
def _lowerCamelCase ( lowercase : str , lowercase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_a = sentencepiece.SentencePieceProcessor(**lowercase )
spm.Load(str(lowercase ) )
return spm
def _lowerCamelCase ( lowercase : str ) -> Union[Dict, List]:
with open(lowercase , "r" ) as f:
return json.load(lowercase )
def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> None:
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase , indent=2 )
| 346 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : List[Any] = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__a ='mgp-str'
def __init__( self : Dict , __a : Union[str, Any]=[32, 1_28] , __a : List[str]=4 , __a : Any=3 , __a : Optional[int]=27 , __a : Union[str, Any]=38 , __a : int=5_02_57 , __a : Union[str, Any]=3_05_22 , __a : Tuple=7_68 , __a : Dict=12 , __a : Any=12 , __a : Optional[Any]=4.0 , __a : Optional[Any]=True , __a : Tuple=False , __a : Optional[Any]=1e-5 , __a : Optional[int]=0.0 , __a : List[Any]=0.0 , __a : Any=0.0 , __a : List[Any]=False , __a : Optional[int]=0.02 , **__a : Tuple , ):
super().__init__(**snake_case__ )
_a = image_size
_a = patch_size
_a = num_channels
_a = max_token_length
_a = num_character_labels
_a = num_bpe_labels
_a = num_wordpiece_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = mlp_ratio
_a = distilled
_a = layer_norm_eps
_a = drop_rate
_a = qkv_bias
_a = attn_drop_rate
_a = drop_path_rate
_a = output_aa_attentions
_a = initializer_range
| 353 |
'''simple docstring'''
from manim import *
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Dict ):
_a = Rectangle(height=0.5 , width=0.5 )
_a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_a = [mem.copy() for i in range(6 )]
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(__a , __a ).arrange(__a , buff=0 )
_a = Text("CPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
_a = [mem.copy() for i in range(4 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("GPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Model" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
_a = []
for i, rect in enumerate(__a ):
rect.set_stroke(__a )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_a = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__a , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__a , buff=0.0 )
self.add(__a )
cpu_targs.append(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Loaded Checkpoint" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , aligned_edge=__a , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
_a = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_a = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a ) , Write(__a ) )
self.play(Write(__a , run_time=1 ) , Create(__a , run_time=1 ) )
_a = []
_a = []
for i, rect in enumerate(__a ):
_a = fill.copy().set_fill(__a , opacity=0.7 )
target.move_to(__a )
first_animations.append(GrowFromCenter(__a , run_time=1 ) )
_a = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(*__a )
self.wait()
| 346 | 0 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['pixel_values']
def __init__( self : Optional[int] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 2_55 , __a : bool = True , __a : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , __a : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **__a : str , ):
super().__init__(**__UpperCAmelCase )
_a = size if size is not None else {"shortest_edge": 2_24}
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_a = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
_a = get_size_dict(__UpperCAmelCase , param_name="crop_size" )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase__ ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ):
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_a = int((2_56 / 2_24) * size["shortest_edge"] )
_a = get_resize_output_image_size(__UpperCAmelCase , size=__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_a = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
__UpperCAmelCase , size=(size_dict["height"], size_dict["width"]) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCamelCase__ ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ):
_a = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(__UpperCAmelCase , size=(size["height"], size["width"]) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCamelCase__ ( self : Union[str, Any] , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ):
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCamelCase__ ( self : str , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ):
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCamelCase__ ( self : Optional[Any] , __a : ImageInput , __a : Optional[bool] = None , __a : Optional[Dict[str, int]] = None , __a : PILImageResampling = None , __a : Optional[bool] = None , __a : Optional[Dict[str, int]] = None , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[Union[float, Iterable[float]]] = None , __a : Optional[Union[float, Iterable[float]]] = None , __a : Optional[TensorType] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[Any] , ):
_a = do_resize if do_resize is not None else self.do_resize
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = size if size is not None else self.size
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(__UpperCAmelCase , param_name="crop_size" )
_a = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_a = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
_a = [self.resize(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for image in images]
if do_center_crop:
_a = [self.center_crop(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
if do_rescale:
_a = [self.rescale(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
if do_normalize:
_a = [self.normalize(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for image in images]
_a = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
_a = {"pixel_values": images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 354 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowerCAmelCase_ : Tuple = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def _lowerCamelCase ( lowercase : List[Any] ) -> Optional[int]:
_a = test_results.split(" " )
_a = 0
_a = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_a = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCamelCase ( lowercase : str ) -> Optional[Any]:
_a = {}
_a = None
_a = False
for line in failures_short_lines.split("\n" ):
if re.search(r"_ \[doctest\]" , lowercase ):
_a = True
_a = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
_a = line
_a = False
return failures
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple , __a : str , __a : Dict ):
_a = title
_a = doc_test_results["time_spent"].split("," )[0]
_a = doc_test_results["success"]
_a = doc_test_results["failures"]
_a = self.n_success + self.n_failures
# Failures and success of the modeling tests
_a = doc_test_results
@property
def UpperCamelCase__ ( self : int ):
_a = [self._time_spent]
_a = 0
for time in time_spent:
_a = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__a ) == 1:
_a = [0, 0, time_parts[0]]
_a , _a , _a = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_a , _a , _a = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(__a )}h{int(__a )}m{int(__a )}s'
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : str ):
_a = 40
_a = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(__a , __a )}
_a = ""
for category, failures in category_failures.items():
if len(__a ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
_a = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__a )
@staticmethod
def UpperCamelCase__ ( ):
_a = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(__a )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=__a , )
def UpperCamelCase__ ( self : Tuple ):
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
_a = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
_a = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=__a , )
def UpperCamelCase__ ( self : Dict , __a : List[str] , __a : List[Any] , __a : Tuple , __a : int ):
_a = ""
for key, value in failures.items():
_a = value[:2_00] + " [Truncated]" if len(__a ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
_a = job_name
_a = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
_a = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCamelCase__ ( self : str ):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
_a = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
_a = sorted(self.doc_test_results.items() , key=lambda __a : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
_a = f'*Num failures* :{len(job_result["failed"] )} \n'
_a = job_result["failures"]
_a = self.get_reply_blocks(__a , __a , __a , text=__a )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=f'Results for {job}' , blocks=__a , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def _lowerCamelCase ( ) -> Any:
_a = os.environ["GITHUB_RUN_ID"]
_a = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
_a = requests.get(lowercase ).json()
_a = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_a = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowercase ):
_a = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase )
return {}
def _lowerCamelCase ( lowercase : str ) -> Dict:
_a = {}
if os.path.exists(lowercase ):
_a = os.listdir(lowercase )
for file in files:
try:
with open(os.path.join(lowercase , lowercase ) , encoding="utf-8" ) as f:
_a = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase , lowercase )}.' ) from e
return _artifact
def _lowerCamelCase ( ) -> str:
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , __a : str ):
_a = name
_a = []
def __str__( self : List[str] ):
return self.name
def UpperCamelCase__ ( self : str , __a : str ):
self.paths.append({"name": self.name, "path": path} )
_a = {}
_a = filter(os.path.isdir , os.listdir() )
for directory in directories:
_a = directory
if artifact_name not in _available_artifacts:
_a = Artifact(lowercase )
_available_artifacts[artifact_name].add_path(lowercase )
return _available_artifacts
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = get_job_links()
lowerCAmelCase_ : Any = retrieve_available_artifacts()
lowerCAmelCase_ : List[str] = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowerCAmelCase_ : Optional[Any] = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowerCAmelCase_ : int = github_actions_job_links.get('run_doctests')
lowerCAmelCase_ : Union[str, Any] = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
lowerCAmelCase_ : List[str] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = handle_test_results(artifact['stats'])
lowerCAmelCase_ : List[str] = failed
lowerCAmelCase_ : Optional[Any] = success
lowerCAmelCase_ : Tuple = time_spent[1:-1] + ', '
lowerCAmelCase_ : List[Any] = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
lowerCAmelCase_ : int = line.replace('FAILED ', '')
lowerCAmelCase_ : Optional[int] = line.split()[0].replace('\n', '')
if "::" in line:
lowerCAmelCase_ , lowerCAmelCase_ : str = line.split('::')
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowerCAmelCase_ : Union[str, Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowerCAmelCase_ : List[str] = all_failures[test] if test in all_failures else 'N/A'
lowerCAmelCase_ : Optional[Any] = failure
break
lowerCAmelCase_ : Tuple = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 346 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __a : Union[str, Any] , __a : Any=7 , __a : Optional[int]=3 , __a : Dict=18 , __a : List[Any]=30 , __a : Any=4_00 , __a : Dict=True , __a : List[Any]=None , __a : str=True , __a : int=None , __a : int=True , __a : Tuple=[0.48145466, 0.4578275, 0.40821073] , __a : Optional[Any]=[0.26862954, 0.26130258, 0.27577711] , __a : List[str]=True , ):
_a = size if size is not None else {"""height""": 2_24, """width""": 2_24}
_a = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_center_crop
_a = crop_size
_a = do_normalize
_a = image_mean
_a = image_std
_a = do_convert_rgb
def UpperCamelCase__ ( self : Dict ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCamelCase__ ( self : Union[str, Any] , __a : Optional[Any]=False , __a : str=False , __a : List[str]=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_a = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_55 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_a = []
for i in range(self.batch_size ):
_a = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_55 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_a = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
if torchify:
_a = [torch.from_numpy(SCREAMING_SNAKE_CASE_ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE (_UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__a =ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self : List[str] ):
_a = ChineseCLIPImageProcessingTester(self , do_center_crop=SCREAMING_SNAKE_CASE_ )
@property
def UpperCamelCase__ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self : Any ):
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_convert_rgb" ) )
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 2_24, "width": 2_24} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def UpperCamelCase__ ( self : str ):
pass
def UpperCamelCase__ ( self : List[Any] ):
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_a = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase__ ( self : Tuple ):
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_a = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase__ ( self : Optional[Any] ):
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_a = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE (_UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__a =ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self : Any ):
_a = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=SCREAMING_SNAKE_CASE_ )
_a = 3
@property
def UpperCamelCase__ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_convert_rgb" ) )
def UpperCamelCase__ ( self : str ):
pass
def UpperCamelCase__ ( self : Optional[Any] ):
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_a = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 355 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCamelCase ( ) -> str:
_a = HfArgumentParser(lowercase )
_a = parser.parse_args_into_dataclasses()[0]
_a = TensorFlowBenchmark(args=lowercase )
try:
_a = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_a = "Arg --no_{0} is no longer used, please use --no-{0} instead."
_a = " ".join(str(lowercase ).split(" " )[:-1] )
_a = ""
_a = eval(str(lowercase ).split(" " )[-1] )
_a = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase )
if len(lowercase ) > 0:
_a = full_error_msg + begin_error_msg + str(lowercase )
raise ValueError(lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 346 | 0 |
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowerCAmelCase_ : Optional[int] = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : Optional[int]=2 , lowercase : Optional[Any]=3 , lowercase : List[Any]=16 , lowercase : int = 10 , lowercase : int = 2 ) -> int:
def get_dataset(lowercase : Optional[int] ):
_a = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_a = get_dataset(lowercase__ )
_a = get_dataset(lowercase__ )
_a = DataLoader(lowercase__ , shuffle=lowercase__ , batch_size=lowercase__ , num_workers=4 )
_a = DataLoader(lowercase__ , shuffle=lowercase__ , batch_size=lowercase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def _lowerCamelCase ( lowercase : Any , lowercase : Any , lowercase : List[Any] , lowercase : List[Any] , lowercase : Optional[Any] , lowercase : str=None ) -> Any:
_a = []
for epoch in range(lowercase__ ):
# Train quickly
model.train()
for batch in dataloader:
_a = batch
_a = model(lowercase__ )
_a = torch.nn.functional.mse_loss(lowercase__ , lowercase__ )
accelerator.backward(lowercase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : str ):
super().__init__()
_a = nn.Parameter(torch.randn(1 ) )
_a = nn.Parameter(torch.randn(1 ) )
def UpperCamelCase__ ( self : str , __a : int ):
return x * self.a + self.b
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_a = DummyModel()
_a = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_a = dummy_dataloaders()
_a = ProjectConfiguration(total_limit=1 , project_dir=__a , automatic_checkpoint_naming=__a )
# Train baseline
_a = Accelerator(project_config=__a )
_a = accelerator.prepare(
__a , __a , __a , __a )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def UpperCamelCase__ ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_a = DummyModel()
_a = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_a = dummy_dataloaders()
# Train baseline
_a = Accelerator()
_a = accelerator.prepare(
__a , __a , __a , __a )
# Save initial
_a = os.path.join(__a , "initial" )
accelerator.save_state(__a )
(_a) = model.a.item(), model.b.item()
_a = optimizer.state_dict()
_a = train(3 , __a , __a , __a , __a )
(_a) = model.a.item(), model.b.item()
_a = optimizer.state_dict()
# Train partially
set_seed(42 )
_a = DummyModel()
_a = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_a = dummy_dataloaders()
_a = Accelerator()
_a = accelerator.prepare(
__a , __a , __a , __a )
accelerator.load_state(__a )
(_a) = model.a.item(), model.b.item()
_a = optimizer.state_dict()
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
_a = train(2 , __a , __a , __a , __a )
# Save everything
_a = os.path.join(__a , "checkpoint" )
accelerator.save_state(__a )
# Load everything back in and make sure all states work
accelerator.load_state(__a )
test_rands += train(1 , __a , __a , __a , __a )
(_a) = model.a.item(), model.b.item()
_a = optimizer.state_dict()
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
def UpperCamelCase__ ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_a = DummyModel()
_a = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_a = dummy_dataloaders()
_a = ProjectConfiguration(automatic_checkpoint_naming=__a )
# Train baseline
_a = Accelerator(project_dir=__a , project_config=__a )
_a = accelerator.prepare(
__a , __a , __a , __a )
# Save initial
accelerator.save_state()
(_a) = model.a.item(), model.b.item()
_a = optimizer.state_dict()
_a = train(3 , __a , __a , __a , __a )
(_a) = model.a.item(), model.b.item()
_a = optimizer.state_dict()
# Train partially
set_seed(42 )
_a = DummyModel()
_a = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_a = dummy_dataloaders()
_a = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=__a )
_a = Accelerator(project_dir=__a , project_config=__a )
_a = accelerator.prepare(
__a , __a , __a , __a )
accelerator.load_state(os.path.join(__a , "checkpoints" , "checkpoint_0" ) )
(_a) = model.a.item(), model.b.item()
_a = optimizer.state_dict()
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
_a = train(2 , __a , __a , __a , __a )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__a , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , __a , __a , __a , __a )
(_a) = model.a.item(), model.b.item()
_a = optimizer.state_dict()
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
def UpperCamelCase__ ( self : Any ):
_a = torch.tensor([1, 2, 3] )
_a = torch.tensor([2, 3, 4] )
_a = DummyModel()
_a = torch.optim.Adam(net.parameters() )
_a = Accelerator()
with self.assertRaises(__a ) as ve:
accelerator.register_for_checkpointing(__a , __a , __a , __a )
_a = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def UpperCamelCase__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_a = DummyModel()
_a = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_a = torch.optim.lr_scheduler.StepLR(__a , step_size=1 , gamma=0.99 )
_a = dummy_dataloaders()
_a = ProjectConfiguration(automatic_checkpoint_naming=__a )
# Train baseline
_a = Accelerator(project_dir=__a , project_config=__a )
_a = accelerator.prepare(
__a , __a , __a , __a , __a )
# Save initial
accelerator.save_state()
_a = scheduler.state_dict()
train(3 , __a , __a , __a , __a , __a )
self.assertNotEqual(__a , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__a , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(__a , scheduler.state_dict() )
def UpperCamelCase__ ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_a = DummyModel()
_a = ProjectConfiguration(automatic_checkpoint_naming=__a , total_limit=2 )
# Train baseline
_a = Accelerator(project_dir=__a , project_config=__a )
_a = accelerator.prepare(__a )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__a , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def UpperCamelCase__ ( self : Dict ):
_a = ["""torchrun""", f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(__a , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase_ : Optional[Any] = '/tmp/accelerate/state_checkpointing'
lowerCAmelCase_ : Any = DummyModel()
lowerCAmelCase_ : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowerCAmelCase_ : Any = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = dummy_dataloaders()
lowerCAmelCase_ : Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowerCAmelCase_ : Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Any = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowerCAmelCase_ : Optional[Any] = group['params'][0].device
break
assert param_device.type == accelerator.device.type
lowerCAmelCase_ : Tuple = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
lowerCAmelCase_ : int = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
lowerCAmelCase_ : Dict = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 356 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase_ : Union[str, Any] = None
try:
import msvcrt
except ImportError:
lowerCAmelCase_ : Tuple = None
try:
import fcntl
except ImportError:
lowerCAmelCase_ : Optional[int] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase_ : Any = OSError
# Data
# ------------------------------------------------
lowerCAmelCase_ : Tuple = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
lowerCAmelCase_ : Optional[int] = '3.0.12'
lowerCAmelCase_ : Tuple = None
def _lowerCamelCase ( ) -> Optional[int]:
global _logger
_a = _logger or logging.getLogger(__name__ )
return _logger
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Dict , __a : Optional[Any] ):
_a = lock_file
return None
def __str__( self : Any ):
_a = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[Any] , __a : Optional[int] ):
_a = lock
return None
def __enter__( self : str ):
return self.lock
def __exit__( self : List[Any] , __a : List[Any] , __a : Union[str, Any] , __a : Dict ):
self.lock.release()
return None
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : Union[str, Any] , __a : Optional[int]=-1 , __a : Tuple=None ):
_a = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
_a = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
_a = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_a = None
# The default timeout value.
_a = timeout
# We use this lock primarily for the lock counter.
_a = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_a = 0
return None
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self._lock_file
@property
def UpperCamelCase__ ( self : List[Any] ):
return self._timeout
@timeout.setter
def UpperCamelCase__ ( self : int , __a : List[Any] ):
_a = float(__a )
return None
def UpperCamelCase__ ( self : Dict ):
raise NotImplementedError()
def UpperCamelCase__ ( self : str ):
raise NotImplementedError()
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self._lock_file_fd is not None
def UpperCamelCase__ ( self : int , __a : int=None , __a : Tuple=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
_a = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_a = id(self )
_a = self._lock_file
_a = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_a = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCamelCase__ ( self : Union[str, Any] , __a : int=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_a = id(self )
_a = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
_a = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self : List[Any] ):
self.acquire()
return self
def __exit__( self : str , __a : str , __a : Dict , __a : Dict ):
self.release()
return None
def __del__( self : int ):
self.release(force=__a )
return None
def UpperCamelCase__ ( self : Tuple , __a : str , __a : int ):
_a = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
_a = os.path.dirname(__a )
_a = str(hash(__a ) )
_a = filename[: max_length - len(__a ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(__a , __a )
else:
return path
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : int , __a : str , __a : List[Any]=-1 , __a : List[Any]=None ):
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
_a = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def UpperCamelCase__ ( self : int ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self._lock_file_fd
_a = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , __a : Optional[Any] , __a : Union[str, Any]=-1 , __a : int=None ):
_a = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def UpperCamelCase__ ( self : Any ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_a = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : Tuple ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_a = self._lock_file_fd
_a = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
_a = fd
return None
def UpperCamelCase__ ( self : Union[str, Any] ):
os.close(self._lock_file_fd )
_a = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase_ : str = None
if msvcrt:
lowerCAmelCase_ : List[str] = WindowsFileLock
elif fcntl:
lowerCAmelCase_ : List[str] = UnixFileLock
else:
lowerCAmelCase_ : int = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 346 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowerCamelCase ( lowercase : list[int] , lowercase : list[int] , lowercase : int ) -> Optional[int]:
_a = list(range(len(A__ ) ) )
_a = [v / w for v, w in zip(A__ , A__ )]
index.sort(key=lambda lowercase : ratio[i] , reverse=A__ )
_a = 0
_a = [0] * len(A__ )
for i in index:
if weight[i] <= capacity:
_a = 1
max_value += value[i]
capacity -= weight[i]
else:
_a = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42
__a =42
__a =42
__a =42
__a =42
def UpperCamelCase__ ( self : str ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase__ ( self : List[str] ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = torch.arange(self.height * self.width )
_a = torch.stack(
[
pixel_indices % self.width,
torch.div(__a , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase__ ( self : List[Any] ):
_a , *_a = self.shape
_a = int(np.prod(__a ) )
_a = self.get_image_coords()
_a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_a = self.get_camera_rays(__a )
_a = rays.view(__a , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase__ ( self : Dict , __a : torch.Tensor ):
_a , *_a , _a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_a = coords.view(__a , -1 , 2 )
_a = self.resolution()
_a = self.fov()
_a = (flat.float() / (res - 1)) * 2 - 1
_a = fracs * torch.tan(fov / 2 )
_a = fracs.view(__a , -1 , 2 )
_a = (
self.z.view(__a , 1 , 3 )
+ self.x.view(__a , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__a , 1 , 3 ) * fracs[:, :, 1:]
)
_a = directions / directions.norm(dim=-1 , keepdim=__a )
_a = torch.stack(
[
torch.broadcast_to(self.origin.view(__a , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__a , *__a , 2 , 3 )
def UpperCamelCase__ ( self : Dict , __a : int , __a : int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__a , height=__a , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase ( lowercase : int ) -> DifferentiableProjectiveCamera:
_a = []
_a = []
_a = []
_a = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_a = np.array([np.sin(lowercase ), np.cos(lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_a = -z * 4
_a = np.array([np.cos(lowercase ), -np.sin(lowercase ), 0.0] )
_a = np.cross(lowercase , lowercase )
origins.append(lowercase )
xs.append(lowercase )
ys.append(lowercase )
zs.append(lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , width=lowercase , height=lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase )) , )
| 346 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
lowerCAmelCase_ : List[str] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
lowerCAmelCase_ : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
__a =field(
default=lowerCAmelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__a =field(
default=lowerCAmelCase_ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
__a =field(default=lowerCAmelCase_ , metadata={'help': 'A folder containing the training data.'} )
__a =field(default=lowerCAmelCase_ , metadata={'help': 'A folder containing the validation data.'} )
__a =field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
__a =field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} )
__a =field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
__a =field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__a =field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = {}
if self.train_dir is not None:
_a = self.train_dir
if self.validation_dir is not None:
_a = self.validation_dir
_a = data_files if data_files else None
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =field(
default=lowerCAmelCase_ , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
__a =field(
default=lowerCAmelCase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowerCAmelCase_ )} , )
__a =field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__a =field(
default=lowerCAmelCase_ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__a =field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
__a =field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__a =field(default=lowerCAmelCase_ , metadata={'help': 'Name or path of preprocessor config.'} )
__a =field(
default=lowerCAmelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__a =field(
default=lowerCAmelCase_ , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
__a =field(
default=lowerCAmelCase_ , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
__a =field(
default=lowerCAmelCase_ , metadata={'help': 'Stride to use for the encoder.'} , )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , __a : Optional[Any]=1_92 , __a : List[str]=32 , __a : List[Any]=4 , __a : List[str]=0.6 ):
_a = input_size
_a = mask_patch_size
_a = model_patch_size
_a = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("Input size must be divisible by mask patch size" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("Mask patch size must be divisible by model patch size" )
_a = self.input_size // self.mask_patch_size
_a = self.mask_patch_size // self.model_patch_size
_a = self.rand_size**2
_a = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : int ):
_a = np.random.permutation(self.token_count )[: self.mask_count]
_a = np.zeros(self.token_count , dtype=__lowerCAmelCase )
_a = 1
_a = mask.reshape((self.rand_size, self.rand_size) )
_a = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def _lowerCamelCase ( lowercase : str ) -> List[str]:
_a = torch.stack([example["pixel_values"] for example in examples] )
_a = torch.stack([example["mask"] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def _lowerCamelCase ( ) -> str:
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_a , _a , _a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_a , _a , _a = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mim" , lowercase , lowercase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_a = training_args.get_process_log_level()
logger.setLevel(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
_a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_a = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase ) and data_args.train_val_split > 0.0:
_a = ds["train"].train_test_split(data_args.train_val_split )
_a = split["train"]
_a = split["test"]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
_a = AutoConfig.from_pretrained(model_args.config_name_or_path , **lowercase )
elif model_args.model_name_or_path:
_a = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase )
else:
_a = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(lowercase , "decoder_type" ):
_a = "simmim"
# adapt config
_a = model_args.image_size if model_args.image_size is not None else config.image_size
_a = model_args.patch_size if model_args.patch_size is not None else config.patch_size
_a = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"image_size": model_args.image_size,
"patch_size": model_args.patch_size,
"encoder_stride": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
_a = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase )
elif model_args.model_name_or_path:
_a = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase )
else:
_a = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
_a = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
_a = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
_a = AutoModelForMaskedImageModeling.from_config(lowercase )
if training_args.do_train:
_a = ds["train"].column_names
else:
_a = ds["validation"].column_names
if data_args.image_column_name is not None:
_a = data_args.image_column_name
elif "image" in column_names:
_a = "image"
elif "img" in column_names:
_a = "img"
else:
_a = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
_a = Compose(
[
Lambda(lambda lowercase : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
_a = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(lowercase : Any ):
_a = [transforms(lowercase ) for image in examples[image_column_name]]
_a = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_a = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_a = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase )
# Initialize our trainer
_a = Trainer(
model=lowercase , args=lowercase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
_a = None
if training_args.resume_from_checkpoint is not None:
_a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_a = last_checkpoint
_a = trainer.train(resume_from_checkpoint=lowercase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_a = trainer.evaluate()
trainer.log_metrics("eval" , lowercase )
trainer.save_metrics("eval" , lowercase )
# Write model card and (optionally) push to hub
_a = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "masked-image-modeling",
"dataset": data_args.dataset_name,
"tags": ["masked-image-modeling"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
if __name__ == "__main__":
main()
| 358 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCAmelCase_ : List[str] = TypeVar('T')
lowerCAmelCase_ : Dict = TypeVar('U')
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : T | None , __a : U | None ):
_a = key
_a = val
_a = None
_a = None
def __repr__( self : Any ):
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Dict ):
_a = DoubleLinkedListNode(__a , __a )
_a = DoubleLinkedListNode(__a , __a )
_a , _a = self.rear, self.head
def __repr__( self : str ):
_a = ["DoubleLinkedList"]
_a = self.head
while node.next is not None:
rep.append(str(__a ) )
_a = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__a )
def UpperCamelCase__ ( self : int , __a : DoubleLinkedListNode[T, U] ):
_a = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_a = node
_a = previous
_a = node
_a = self.rear
def UpperCamelCase__ ( self : Any , __a : DoubleLinkedListNode[T, U] ):
if node.prev is None or node.next is None:
return None
_a = node.next
_a = node.prev
_a = None
_a = None
return node
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
__a ={}
def __init__( self : Union[str, Any] , __a : int ):
_a = DoubleLinkedList()
_a = capacity
_a = 0
_a = 0
_a = 0
_a = {}
def __repr__( self : Optional[int] ):
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self : str , __a : T ):
return key in self.cache
def UpperCamelCase__ ( self : str , __a : T ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_a = self.cache[key]
_a = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__a )
return node.val
self.miss += 1
return None
def UpperCamelCase__ ( self : Tuple , __a : T , __a : U ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_a = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__a ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_a = DoubleLinkedListNode(__a , __a )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_a = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_a = value
self.list.add(__a )
@classmethod
def UpperCamelCase__ ( cls : Tuple , __a : int = 1_28 ):
def cache_decorator_inner(__a : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*__a : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
_a = LRUCache(__a )
_a = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_a = func(*__a )
cls.decorator_function_to_instance_map[func].put(args[0] , __a )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__a , "cache_info" , __a ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 | 0 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCAmelCase_ : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCAmelCase_ : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowerCAmelCase_ : set[int] = {ord(char) for char in VALID_CHARS}
lowerCAmelCase_ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def _lowerCamelCase ( lowercase : list[int] , lowercase : tuple[int, ...] ) -> str | None:
_a = ""
_a = 42
_a = 42
_a = 42
for keychar, cipherchar in zip(cycle(_UpperCamelCase ) , _UpperCamelCase ):
_a = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_UpperCamelCase )
return decoded
def _lowerCamelCase ( lowercase : list[int] ) -> list[str]:
_a = []
for key in product(_UpperCamelCase , repeat=3 ):
_a = try_key(_UpperCamelCase , _UpperCamelCase )
if encoded is not None:
possibles.append(_UpperCamelCase )
return possibles
def _lowerCamelCase ( lowercase : list[str] , lowercase : str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def _lowerCamelCase ( lowercase : str = "p059_cipher.txt" ) -> int:
_a = 42
_a = 42
_a = 42
_a = 42
_a = Path(_UpperCamelCase ).parent.joinpath(_UpperCamelCase ).read_text(encoding="utf-8" )
_a = [int(_UpperCamelCase ) for number in data.strip().split("," )]
_a = filter_valid_chars(_UpperCamelCase )
for common_word in COMMON_WORDS:
_a = filter_common_word(_UpperCamelCase , _UpperCamelCase )
if len(_UpperCamelCase ) == 1:
break
_a = possibles[0]
return sum(ord(_UpperCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 359 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase_ : Optional[int] = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase_ : Tuple = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _lowerCamelCase ( lowercase : str ) -> str:
re.sub("<n>" , "" , lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowercase ) )
| 346 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='audio-spectrogram-transformer'
def __init__( self : Any , __a : Tuple=7_68 , __a : List[Any]=12 , __a : List[str]=12 , __a : List[Any]=30_72 , __a : Tuple="gelu" , __a : int=0.0 , __a : int=0.0 , __a : List[str]=0.02 , __a : Optional[int]=1e-1_2 , __a : str=16 , __a : Optional[Any]=True , __a : int=10 , __a : List[str]=10 , __a : Tuple=10_24 , __a : Dict=1_28 , **__a : Dict , ):
super().__init__(**__lowerCAmelCase )
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = layer_norm_eps
_a = patch_size
_a = qkv_bias
_a = frequency_stride
_a = time_stride
_a = max_length
_a = num_mel_bins
| 360 |
'''simple docstring'''
import requests
lowerCAmelCase_ : List[Any] = 'YOUR API KEY'
def _lowerCamelCase ( lowercase : str , lowercase : str = giphy_api_key ) -> list:
_a = "+".join(query.split() )
_a = F'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'
_a = requests.get(lowercase ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 346 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =42
__a =None
__a =None
lowerCAmelCase_ : str = namedtuple('CoinsDistribResult', 'moves excess')
def _lowerCamelCase ( lowercase : Optional[Any] ) -> int:
if root is None:
return 0
# Validation
def count_nodes(lowercase : Dict ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase : Optional[int] ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(a__ ) != count_coins(a__ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(lowercase : Union[str, Any] ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_a , _a = get_distrib(node.left )
_a , _a = get_distrib(node.right )
_a = 1 - left_distrib_excess
_a = 1 - right_distrib_excess
_a = (
left_distrib_moves
+ right_distrib_moves
+ abs(a__ )
+ abs(a__ )
)
_a = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a__ , a__ )
return get_distrib(a__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : str = '▁'
lowerCAmelCase_ : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =BertGenerationTokenizer
__a =False
__a =True
def UpperCamelCase__ ( self : Optional[Any] ):
super().setUp()
_a = BertGenerationTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self : Tuple ):
_a = "<s>"
_a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def UpperCamelCase__ ( self : List[str] ):
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(__a ) , 10_02 )
def UpperCamelCase__ ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def UpperCamelCase__ ( self : Tuple ):
_a = BertGenerationTokenizer(__a , keep_accents=__a )
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [2_85, 46, 10, 1_70, 3_82] , )
_a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_a = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_a = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase__ ( self : Any ):
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def UpperCamelCase__ ( self : List[str] ):
_a = "Hello World!"
_a = [1_85_36, 22_60, 1_01]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
_a = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_a = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@require_torch
@slow
def UpperCamelCase__ ( self : Tuple ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_a = list(self.big_tokenizer.get_vocab().keys() )[:10]
_a = " ".join(__a )
_a = self.big_tokenizer.encode_plus(__a , return_tensors="pt" , return_token_type_ids=__a )
_a = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__a )
_a = BertGenerationConfig()
_a = BertGenerationEncoder(__a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__a )
model(**__a )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
# fmt: off
_a = {"input_ids": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 346 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __SCREAMING_SNAKE_CASE (__snake_case , __snake_case ):
"""simple docstring"""
__a ='nat'
__a ={
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Dict , __a : Dict=4 , __a : Any=3 , __a : int=64 , __a : int=[3, 4, 6, 5] , __a : int=[2, 4, 8, 16] , __a : Dict=7 , __a : Union[str, Any]=3.0 , __a : int=True , __a : List[Any]=0.0 , __a : List[Any]=0.0 , __a : Optional[Any]=0.1 , __a : Optional[Any]="gelu" , __a : Dict=0.02 , __a : Any=1e-5 , __a : Optional[int]=0.0 , __a : Any=None , __a : int=None , **__a : Any , ):
super().__init__(**__a )
_a = patch_size
_a = num_channels
_a = embed_dim
_a = depths
_a = len(__a )
_a = num_heads
_a = kernel_size
_a = mlp_ratio
_a = qkv_bias
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = drop_path_rate
_a = hidden_act
_a = layer_norm_eps
_a = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_a = int(embed_dim * 2 ** (len(__a ) - 1) )
_a = layer_scale_init_value
_a = ["stem"] + [f'stage{idx}' for idx in range(1 , len(__a ) + 1 )]
_a , _a = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 362 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Union[str, Any]:
_enforce_args(lowercase , lowercase )
if n == 0:
return 0
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + naive_cut_rod_recursive(n - i , lowercase ) )
return max_revue
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Tuple:
_enforce_args(lowercase , lowercase )
_a = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowercase , lowercase , lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : list , lowercase : list ) -> List[str]:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowercase , lowercase ) , )
_a = max_revenue
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Any:
_enforce_args(lowercase , lowercase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_a = [float("-inf" ) for _ in range(n + 1 )]
_a = 0
for i in range(1 , n + 1 ):
_a = max_rev[i]
for j in range(1 , i + 1 ):
_a = max(lowercase , prices[j - 1] + max_rev[i - j] )
_a = max_revenue_i
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Dict:
if n < 0:
_a = F'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(lowercase )
if n > len(lowercase ):
_a = (
"Each integral piece of rod must have a corresponding price. "
F'Got n = {n} but length of prices = {len(lowercase )}'
)
raise ValueError(lowercase )
def _lowerCamelCase ( ) -> Any:
_a = [6, 10, 12, 15, 20, 23]
_a = len(lowercase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_a = 36
_a = top_down_cut_rod(lowercase , lowercase )
_a = bottom_up_cut_rod(lowercase , lowercase )
_a = naive_cut_rod_recursive(lowercase , lowercase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 346 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : List[str] = logging.get_logger(__name__)
lowerCAmelCase_ : Union[str, Any] = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='''roformer'''
def __init__( self : int , __a : Union[str, Any]=5_00_00 , __a : int=None , __a : str=7_68 , __a : List[str]=12 , __a : Any=12 , __a : List[Any]=30_72 , __a : List[Any]="gelu" , __a : Optional[int]=0.1 , __a : str=0.1 , __a : List[Any]=15_36 , __a : Union[str, Any]=2 , __a : Tuple=0.02 , __a : int=1e-1_2 , __a : List[str]=0 , __a : Dict=False , __a : Dict=True , **__a : Tuple , ):
super().__init__(pad_token_id=_a , **_a )
_a = vocab_size
_a = hidden_size if embedding_size is None else embedding_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = rotary_value
_a = use_cache
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Optional[int] ):
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 363 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , *__a : Optional[int] , **__a : List[str] ):
super().__init__(*__a , **__a )
self.check_model_type(__a )
def UpperCamelCase__ ( self : Optional[Any] , __a : Dict=None , __a : int=None , __a : Optional[Any]=None , **__a : List[Any] ):
_a , _a = {}, {}
if padding is not None:
_a = padding
if truncation is not None:
_a = truncation
if top_k is not None:
_a = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any] , __a : Union["Image.Image", str] , __a : str = None , **__a : Any ):
if isinstance(__a , (Image.Image, str) ) and isinstance(__a , __a ):
_a = {"image": image, "question": question}
else:
_a = image
_a = super().__call__(__a , **__a )
return results
def UpperCamelCase__ ( self : Tuple , __a : Tuple , __a : Optional[Any]=False , __a : List[Any]=False ):
_a = load_image(inputs["image"] )
_a = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=__a , truncation=__a )
_a = self.image_processor(images=__a , return_tensors=self.framework )
model_inputs.update(__a )
return model_inputs
def UpperCamelCase__ ( self : List[Any] , __a : List[str] ):
_a = self.model(**__a )
return model_outputs
def UpperCamelCase__ ( self : int , __a : Optional[int] , __a : Dict=5 ):
if top_k > self.model.config.num_labels:
_a = self.model.config.num_labels
if self.framework == "pt":
_a = model_outputs.logits.sigmoid()[0]
_a , _a = probs.topk(__a )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
_a = scores.tolist()
_a = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
| 346 | 0 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__a =XLMRobertaTokenizer
__a =XLMRobertaTokenizerFast
__a =True
__a =True
def UpperCamelCase__ ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
_a = XLMRobertaTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self : Tuple ):
_a = "<pad>"
_a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self : List[Any] ):
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 10_02 )
def UpperCamelCase__ ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def UpperCamelCase__ ( self : Optional[int] ):
_a = XLMRobertaTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_a = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_a = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def UpperCamelCase__ ( self : Union[str, Any] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_a = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_a = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_a = tempfile.mkdtemp()
_a = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE )
_a = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
_a = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
_a = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE )
_a = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
_a = tempfile.mkdtemp()
_a = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE , legacy_format=_SCREAMING_SNAKE_CASE )
_a = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
_a = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE )
_a = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
shutil.rmtree(_SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
_a = tempfile.mkdtemp()
_a = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE , legacy_format=_SCREAMING_SNAKE_CASE )
_a = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_a = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE )
_a = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
shutil.rmtree(_SCREAMING_SNAKE_CASE )
@cached_property
def UpperCamelCase__ ( self : Optional[Any] ):
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def UpperCamelCase__ ( self : int ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_SCREAMING_SNAKE_CASE , f.name )
_a = XLMRobertaTokenizer(f.name , keep_accents=_SCREAMING_SNAKE_CASE )
_a = pickle.dumps(_SCREAMING_SNAKE_CASE )
pickle.loads(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self : List[Any] ):
if not self.test_rust_tokenizer:
return
_a = self.get_tokenizer()
_a = self.get_rust_tokenizer()
_a = "I was born in 92000, and this is falsé."
_a = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
_a = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_a = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
_a = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_a = self.get_rust_tokenizer()
_a = tokenizer.encode(_SCREAMING_SNAKE_CASE )
_a = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
_a = "Hello World!"
_a = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_a = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def UpperCamelCase__ ( self : Optional[Any] ):
# fmt: off
_a = {"input_ids": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 364 |
'''simple docstring'''
from random import randint, random
def _lowerCamelCase ( lowercase : int , lowercase : int , lowercase : int , lowercase : bool = False , lowercase : bool = False , lowercase : int = 5 , ) -> list:
_a = [[-1] * number_of_cells] # Create a highway without any car
_a = 0
_a = max(lowercase , 0 )
while i < number_of_cells:
_a = (
randint(0 , lowercase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _lowerCamelCase ( lowercase : list , lowercase : int ) -> int:
_a = 0
_a = highway_now[car_index + 1 :]
for cell in range(len(lowercase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowercase , -1 )
def _lowerCamelCase ( lowercase : list , lowercase : float , lowercase : int ) -> list:
_a = len(lowercase )
# Beforce calculations, the highway is empty
_a = [-1] * number_of_cells
for car_index in range(lowercase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_a = min(highway_now[car_index] + 1 , lowercase )
# Number of empty cell before the next car
_a = get_distance(lowercase , lowercase ) - 1
# We can't have the car causing an accident
_a = min(next_highway[car_index] , lowercase )
if random() < probability:
# Randomly, a driver will slow down
_a = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _lowerCamelCase ( lowercase : list , lowercase : int , lowercase : float , lowercase : int ) -> list:
_a = len(highway[0] )
for i in range(lowercase ):
_a = update(highway[i] , lowercase , lowercase )
_a = [-1] * number_of_cells
for car_index in range(lowercase ):
_a = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_a = (car_index + speed) % number_of_cells
# Commit the change of position
_a = speed
highway.append(lowercase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : Any ) -> int:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
_a = gray_code_sequence_string(_UpperCAmelCase )
#
# convert them to integers
for i in range(len(_UpperCAmelCase ) ):
_a = int(sequence[i] , 2 )
return sequence
def _lowerCamelCase ( lowercase : Tuple ) -> Dict:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_a = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_a = gray_code_sequence_string(bit_count - 1 )
_a = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_a = "0" + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_a = "1" + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 10 ) -> str:
if not isinstance(lowercase , lowercase ) or n < 0:
raise ValueError("Invalid input" )
_a = 10**n
_a = 2_8433 * (pow(2 , 783_0457 , lowercase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""")
| 346 | 0 |
'''simple docstring'''
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _lowerCamelCase ( lowercase : List[str] ) -> Union[str, Any]:
_a = botoa.client("iam" )
_a = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=SCREAMING_SNAKE_CASE_ , AssumeRolePolicyDocument=json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 ) )
_a = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:*",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetAuthorizationToken",
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
],
"Resource": "*",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=SCREAMING_SNAKE_CASE_ , PolicyName=F'{role_name}_policy_permission' , PolicyDocument=json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'role {role_name} already exists. Using existing one' )
def _lowerCamelCase ( lowercase : List[str] ) -> List[str]:
_a = botoa.client("iam" )
return iam_client.get_role(RoleName=SCREAMING_SNAKE_CASE_ )["Role"]["Arn"]
def _lowerCamelCase ( ) -> Optional[Any]:
_a = _ask_options(
"How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , SCREAMING_SNAKE_CASE_ , )
_a = None
if credentials_configuration == 0:
_a = _ask_field("Enter your AWS Profile name: [default] " , default="default" )
_a = aws_profile
else:
print(
"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" )
_a = _ask_field("AWS Access Key ID: " )
_a = aws_access_key_id
_a = _ask_field("AWS Secret Access Key: " )
_a = aws_secret_access_key
_a = _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" )
_a = aws_region
_a = _ask_options(
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , SCREAMING_SNAKE_CASE_ , )
if role_management == 0:
_a = _ask_field("Enter your IAM role name: " )
else:
_a = "accelerate_sagemaker_execution_role"
print(F'Accelerate will create an iam role "{iam_role_name}" using the provided credentials' )
_create_iam_role_for_sagemaker(SCREAMING_SNAKE_CASE_ )
_a = _ask_field(
"Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message="Please enter yes or no." , )
_a = None
if is_custom_docker_image:
_a = _ask_field("Enter your Docker image: " , lambda lowercase : str(SCREAMING_SNAKE_CASE_ ).lower() )
_a = _ask_field(
"Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message="Please enter yes or no." , )
_a = None
if is_sagemaker_inputs_enabled:
_a = _ask_field(
"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda lowercase : str(SCREAMING_SNAKE_CASE_ ).lower() , )
_a = _ask_field(
"Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message="Please enter yes or no." , )
_a = None
if is_sagemaker_metrics_enabled:
_a = _ask_field(
"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda lowercase : str(SCREAMING_SNAKE_CASE_ ).lower() , )
_a = _ask_options(
"What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , )
_a = {}
_a = _ask_field(
"Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message="Please enter yes or no." , )
if use_dynamo:
_a = "dynamo_"
_a = _ask_options(
"Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
_a = _ask_field(
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message="Please enter yes or no." , )
if use_custom_options:
_a = _ask_options(
"Which mode do you want to use?" , SCREAMING_SNAKE_CASE_ , lambda lowercase : TORCH_DYNAMO_MODES[int(SCREAMING_SNAKE_CASE_ )] , default="default" , )
_a = _ask_field(
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message="Please enter yes or no." , )
_a = _ask_field(
"Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message="Please enter yes or no." , )
_a = "Which EC2 instance type you want to use for your training?"
if distributed_type != SageMakerDistributedType.NO:
_a = _ask_options(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , lambda lowercase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(SCREAMING_SNAKE_CASE_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
_a = _ask_field(SCREAMING_SNAKE_CASE_ , lambda lowercase : str(SCREAMING_SNAKE_CASE_ ).lower() , default="ml.p3.2xlarge" )
_a = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
_a = _ask_field(
"How many machines do you want use? [1]: " , SCREAMING_SNAKE_CASE_ , default=1 , )
_a = _ask_options(
"Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." )
return SageMakerConfig(
image_uri=SCREAMING_SNAKE_CASE_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=SCREAMING_SNAKE_CASE_ , use_cpu=SCREAMING_SNAKE_CASE_ , dynamo_config=SCREAMING_SNAKE_CASE_ , eca_instance_type=SCREAMING_SNAKE_CASE_ , profile=SCREAMING_SNAKE_CASE_ , region=SCREAMING_SNAKE_CASE_ , iam_role_name=SCREAMING_SNAKE_CASE_ , mixed_precision=SCREAMING_SNAKE_CASE_ , num_machines=SCREAMING_SNAKE_CASE_ , sagemaker_inputs_file=SCREAMING_SNAKE_CASE_ , sagemaker_metrics_file=SCREAMING_SNAKE_CASE_ , )
| 366 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 6008_5147_5143 ) -> int:
try:
_a = int(lowercase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
_a = 2
_a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_a = i
while n % i == 0:
_a = n // i
i += 1
return int(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 346 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> str:
if number > 0:
raise ValueError("input must be a negative integer" )
_a = len(bin(UpperCamelCase__ )[3:] )
_a = bin(abs(UpperCamelCase__ ) - (1 << binary_number_length) )[3:]
_a = (
(
"1"
+ "0" * (binary_number_length - len(UpperCamelCase__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
lowerCAmelCase_ : List[Any] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = {'facebook/bart-base': BartForConditionalGeneration}
lowerCAmelCase_ : int = {'facebook/bart-base': BartTokenizer}
def _lowerCamelCase ( ) -> Union[str, Any]:
_a = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=lowercase , default=lowercase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=lowercase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=lowercase , default=lowercase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=lowercase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=lowercase , )
parser.add_argument(
"--config_name" , type=lowercase , default=lowercase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=lowercase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=lowercase , default=lowercase , help="Where to store the final ONNX file." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple="cpu" ) -> Optional[Any]:
_a = model_dict[model_name].from_pretrained(lowercase ).to(lowercase )
_a = tokenizer_dict[model_name].from_pretrained(lowercase )
if model_name in ["facebook/bart-base"]:
_a = 0
_a = None
_a = 0
return huggingface_model, tokenizer
def _lowerCamelCase ( lowercase : List[str] , lowercase : Tuple , lowercase : int , lowercase : Any , lowercase : Dict ) -> Any:
model.eval()
_a = None
_a = torch.jit.script(BARTBeamSearchGenerator(lowercase ) )
with torch.no_grad():
_a = "My friends are cool but they eat too many carbs."
_a = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="pt" ).to(model.device )
_a = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=lowercase , max_length=lowercase , early_stopping=lowercase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
lowercase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , lowercase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=lowercase , )
logger.info("Model exported to {}".format(lowercase ) )
_a = remove_dup_initializers(os.path.abspath(lowercase ) )
logger.info("Deduplicated and optimized model written to {}".format(lowercase ) )
_a = onnxruntime.InferenceSession(lowercase )
_a = ort_sess.run(
lowercase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(lowercase ),
"max_length": np.array(lowercase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def _lowerCamelCase ( ) -> Any:
_a = parse_args()
_a = 5
_a = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_a = torch.device(args.device )
_a , _a = load_model_tokenizer(args.model_name_or_path , lowercase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(lowercase )
if args.max_length:
_a = args.max_length
if args.num_beams:
_a = args.num_beams
if args.output_file_path:
_a = args.output_file_path
else:
_a = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(lowercase , lowercase , lowercase , lowercase , lowercase )
if __name__ == "__main__":
main()
| 346 | 0 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] , __a : List[str] ):
_a = 3
_a = 2_50
_a = ids_tensor((batch_size, length) , _lowerCamelCase )
_a = torch.ones((batch_size, length) , device=_lowerCamelCase , dtype=torch.float ) / length
return input_ids, scores
def UpperCamelCase__ ( self : List[str] ):
_a , _a = self._get_tensors(5 )
_a = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(_lowerCamelCase , _lowerCamelCase ) )
_a , _a = self._get_tensors(9 )
self.assertFalse(criteria(_lowerCamelCase , _lowerCamelCase ) )
_a , _a = self._get_tensors(10 )
self.assertTrue(criteria(_lowerCamelCase , _lowerCamelCase ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = MaxLengthCriteria(max_length=10 )
_a , _a = self._get_tensors(5 )
self.assertFalse(criteria(_lowerCamelCase , _lowerCamelCase ) )
_a , _a = self._get_tensors(9 )
self.assertFalse(criteria(_lowerCamelCase , _lowerCamelCase ) )
_a , _a = self._get_tensors(10 )
self.assertTrue(criteria(_lowerCamelCase , _lowerCamelCase ) )
def UpperCamelCase__ ( self : List[Any] ):
_a = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_a , _a = self._get_tensors(5 )
self.assertFalse(criteria(_lowerCamelCase , _lowerCamelCase ) )
_a , _a = self._get_tensors(9 )
self.assertFalse(criteria(_lowerCamelCase , _lowerCamelCase ) )
_a , _a = self._get_tensors(10 )
self.assertTrue(criteria(_lowerCamelCase , _lowerCamelCase ) )
_a = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def UpperCamelCase__ ( self : Any ):
_a , _a = self._get_tensors(5 )
_a = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(_lowerCamelCase , _lowerCamelCase ) )
_a = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(_lowerCamelCase , _lowerCamelCase ) )
def UpperCamelCase__ ( self : Optional[int] ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(_lowerCamelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
_a = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(_lowerCamelCase ) , 1 )
| 368 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase_ : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def _lowerCamelCase ( lowercase : str ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase )
def _lowerCamelCase ( lowercase : Dict ) -> str:
from transformers.testing_utils import pytest_terminal_summary_main
_a = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(lowercase , id=lowercase )
| 346 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ : List[Any] = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCAmelCase_ : List[Any] = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCAmelCase_ : Any = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCAmelCase_ : List[str] = {
"facebook/dpr-ctx_encoder-single-nq-base": 5_12,
"facebook/dpr-ctx_encoder-multiset-base": 5_12,
}
lowerCAmelCase_ : str = {
"facebook/dpr-question_encoder-single-nq-base": 5_12,
"facebook/dpr-question_encoder-multiset-base": 5_12,
}
lowerCAmelCase_ : Optional[Any] = {
"facebook/dpr-reader-single-nq-base": 5_12,
"facebook/dpr-reader-multiset-base": 5_12,
}
lowerCAmelCase_ : Optional[int] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowerCAmelCase_ : List[str] = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowerCAmelCase_ : Optional[Any] = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__a =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__a =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : List[Any] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
lowerCAmelCase_ : str = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
lowerCAmelCase_ : List[str] = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(lowerCamelCase_ )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __call__( self : List[str] , __a : int , __a : str = None , __a : Dict = None , __a : Tuple = False , __a : Union[str, Any] = False , __a : List[str] = None , __a : List[Any] = None , __a : str = None , **__a : str , ):
if titles is None and texts is None:
return super().__call__(
_a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
elif titles is None or texts is None:
_a = titles if texts is None else texts
return super().__call__(
_a , _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
_a = titles if not isinstance(_a , _a ) else [titles]
_a = texts if not isinstance(_a , _a ) else [texts]
_a = len(_a )
_a = questions if not isinstance(_a , _a ) else [questions] * n_passages
if len(_a ) != len(_a ):
raise ValueError(
f'There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.' )
_a = super().__call__(_a , _a , padding=_a , truncation=_a )["input_ids"]
_a = super().__call__(_a , add_special_tokens=_a , padding=_a , truncation=_a )["input_ids"]
_a = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_a , _a )
]
}
if return_attention_mask is not False:
_a = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_a = attention_mask
return self.pad(_a , padding=_a , max_length=_a , return_tensors=_a )
def UpperCamelCase__ ( self : Dict , __a : Optional[int] , __a : Union[str, Any] , __a : Any = 16 , __a : Any = 64 , __a : str = 4 , ):
_a = reader_input["input_ids"]
_a = reader_output[:3]
_a = len(_a )
_a = sorted(range(_a ) , reverse=_a , key=relevance_logits.__getitem__ )
_a = []
for doc_id in sorted_docs:
_a = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_a = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_a = sequence_ids.index(self.pad_token_id )
else:
_a = len(_a )
_a = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_a , top_spans=_a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_a , start_index=_a , end_index=_a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase__ ( self : Dict , __a : Tuple , __a : List[Any] , __a : Optional[Any] , __a : Optional[int] , ):
_a = []
for start_index, start_score in enumerate(_a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_a = sorted(_a , key=lambda __a : x[1] , reverse=_a )
_a = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'Wrong span indices: [{start_index}:{end_index}]' )
_a = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCamelCase_ )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =READER_PRETRAINED_VOCAB_FILES_MAP
__a =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =READER_PRETRAINED_INIT_CONFIGURATION
__a =['input_ids', 'attention_mask']
| 369 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : List[Any] , __a : int , __a : int , __a : int , __a : float , __a : int , __a : int , __a : int , __a : int , __a : str , __a : bool = False , ):
super().__init__()
_a = nn.Embedding(__a , __a )
_a = nn.Embedding(__a , __a )
_a = False
_a = nn.Dropout(p=__a )
_a = TaConfig(
vocab_size=__a , d_model=__a , num_heads=__a , d_kv=__a , d_ff=__a , dropout_rate=__a , feed_forward_proj=__a , is_decoder=__a , is_encoder_decoder=__a , )
_a = nn.ModuleList()
for lyr_num in range(__a ):
_a = TaBlock(__a )
self.encoders.append(__a )
_a = TaLayerNorm(__a )
_a = nn.Dropout(p=__a )
def UpperCamelCase__ ( self : str , __a : Union[str, Any] , __a : Dict ):
_a = self.token_embedder(__a )
_a = encoder_input_tokens.shape[1]
_a = torch.arange(__a , device=encoder_input_tokens.device )
x += self.position_encoding(__a )
_a = self.dropout_pre(__a )
# inverted the attention mask
_a = encoder_input_tokens.size()
_a = self.get_extended_attention_mask(__a , __a )
for lyr in self.encoders:
_a = lyr(__a , __a )[0]
_a = self.layer_norm(__a )
return self.dropout_post(__a ), encoder_inputs_mask
| 346 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
__a =StableDiffusionLDMaDPipeline
__a =TEXT_TO_IMAGE_PARAMS
__a =TEXT_TO_IMAGE_BATCH_PARAMS
__a =TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self : str ):
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_a = CLIPTextModel(_a )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCamelCase__ ( self : List[str] , __a : Optional[Any] , __a : Tuple=0 ):
if str(_a ).startswith("mps" ):
_a = torch.manual_seed(_a )
else:
_a = torch.Generator(device=_a ).manual_seed(_a )
_a = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCamelCase__ ( self : Tuple ):
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = StableDiffusionLDMaDPipeline(**_a )
_a = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a = self.get_dummy_inputs(_a )
_a = ldmad_pipe(**_a )
_a = output.rgb, output.depth
_a = rgb[0, -3:, -3:, -1]
_a = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_a = np.array(
[0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] )
_a = np.array([1_03.4_67_27, 85.81_20_04, 87.84_92_36] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def UpperCamelCase__ ( self : str ):
_a = self.get_dummy_components()
_a = StableDiffusionLDMaDPipeline(**_a )
_a = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a = self.get_dummy_inputs(_a )
_a = 3 * [inputs["prompt"]]
# forward
_a = ldmad_pipe(**_a )
_a = output.rgb, output.depth
_a = rgb_slice_a[0, -3:, -3:, -1]
_a = depth_slice_a[0, -3:, -1]
_a = self.get_dummy_inputs(_a )
_a = 3 * [inputs.pop("prompt" )]
_a = ldmad_pipe.tokenizer(
_a , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_a , return_tensors="pt" , )
_a = text_inputs["input_ids"].to(_a )
_a = ldmad_pipe.text_encoder(_a )[0]
_a = prompt_embeds
# forward
_a = ldmad_pipe(**_a )
_a = output.rgb, output.depth
_a = rgb_slice_a[0, -3:, -3:, -1]
_a = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def UpperCamelCase__ ( self : List[str] ):
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = PNDMScheduler(skip_prk_steps=_a )
_a = StableDiffusionLDMaDPipeline(**_a )
_a = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a = self.get_dummy_inputs(_a )
_a = "french fries"
_a = ldmad_pipe(**_a , negative_prompt=_a )
_a = output.rgb, output.depth
_a = rgb[0, -3:, -3:, -1]
_a = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_a = np.array(
[0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] )
_a = np.array([1_07.8_47_38, 84.6_28_02, 89.96_21_35] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self : List[str] , __a : Dict , __a : List[str]="cpu" , __a : int=torch.floataa , __a : Tuple=0 ):
_a = torch.Generator(device=_a ).manual_seed(_a )
_a = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
_a = torch.from_numpy(_a ).to(device=_a , dtype=_a )
_a = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCamelCase__ ( self : int ):
_a = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
_a = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a = self.get_inputs(_a )
_a = ldmad_pipe(**_a )
_a = output.rgb, output.depth
_a = rgb[0, -3:, -3:, -1].flatten()
_a = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12)
_a = np.array(
[0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] )
_a = np.array(
[0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self : int , __a : str , __a : Any="cpu" , __a : Any=torch.floataa , __a : Optional[int]=0 ):
_a = torch.Generator(device=_a ).manual_seed(_a )
_a = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
_a = torch.from_numpy(_a ).to(device=_a , dtype=_a )
_a = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCamelCase__ ( self : Any ):
_a = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a = self.get_inputs(_a )
_a = ldmad_pipe(**_a )
_a = output.rgb, output.depth
_a = 0.495586
_a = 0.33795515
_a = 1_12.4_85_18
_a = 98.48_97_46
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def UpperCamelCase__ ( self : Dict ):
_a = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a = self.get_inputs(_a )
_a = ldmad_pipe(**_a )
_a = output.rgb, output.depth
_a = 0.4194127
_a = 0.35375586
_a = 0.5638502
_a = 0.34686103
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 370 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Any ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[str] , lowercase : Dict ) -> Union[str, Any]:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
_a = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Dict , lowercase : Dict ) -> str:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple , __a : Optional[int] , __a : Any ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Tuple , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Dict=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : List[Any] , __a : Dict ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : str ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 346 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class __SCREAMING_SNAKE_CASE (_a ):
"""simple docstring"""
__a ='ctrl'
__a =['past_key_values']
__a ={
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __a : List[Any]=24_65_34 , __a : Any=2_56 , __a : Optional[int]=12_80 , __a : str=81_92 , __a : List[str]=48 , __a : List[Any]=16 , __a : Tuple=0.1 , __a : List[str]=0.1 , __a : int=1e-6 , __a : Union[str, Any]=0.02 , __a : List[Any]=True , **__a : int , ):
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = dff
_a = resid_pdrop
_a = embd_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = use_cache
super().__init__(**snake_case_ )
| 371 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
lowerCAmelCase_ : str = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
lowerCAmelCase_ : Union[str, Any] = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def _lowerCamelCase ( lowercase : List[str] ) -> List[Any]:
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = numpy_to_pil(lowercase )
return images
def _lowerCamelCase ( lowercase : int ) -> List[Any]:
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
_a = [Image.fromarray(lowercase ) for image in images]
return pil_images
| 346 | 0 |
'''simple docstring'''
from manim import *
class __SCREAMING_SNAKE_CASE (__lowerCamelCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = Rectangle(height=0.5 , width=0.5 )
_a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_a = Rectangle(height=0.25 , width=0.25 )
_a = [mem.copy() for i in range(6 )]
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
_a = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
_a = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
_a = Text("CPU" , font_size=24 )
_a = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
_a = [mem.copy() for i in range(4 )]
_a = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
_a = Text("GPU" , font_size=24 )
_a = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
_a = Text("Model" , font_size=24 )
_a = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
_a = []
_a = []
for i, rect in enumerate(__lowercase ):
_a = fill.copy().set_fill(__lowercase , opacity=0.8 )
target.move_to(__lowercase )
model_arr.append(__lowercase )
_a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase )
_a = [meta_mem.copy() for i in range(6 )]
_a = [meta_mem.copy() for i in range(6 )]
_a = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
_a = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
_a = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
_a = Text("Disk" , font_size=24 )
_a = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
disk.move_to([-4, -1.25, 0] )
self.add(__lowercase , __lowercase )
_a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase , __lowercase )
_a = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowercase )
_a = MarkupText(
f'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase ) )
_a = Square(0.3 )
input.set_fill(__lowercase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __lowercase , buff=0.5 )
self.play(Write(__lowercase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__lowercase , buff=0.02 )
self.play(MoveToTarget(__lowercase ) )
self.play(FadeOut(__lowercase ) )
_a = Arrow(start=__lowercase , end=__lowercase , color=__lowercase , buff=0.5 )
a.next_to(model_arr[0].get_left() , __lowercase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_a = MarkupText(
f'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) )
_a = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02}
self.play(
Write(__lowercase ) , Circumscribe(model_arr[0] , color=__lowercase , **__lowercase ) , Circumscribe(model_cpu_arr[0] , color=__lowercase , **__lowercase ) , Circumscribe(gpu_rect[0] , color=__lowercase , **__lowercase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_a = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , __lowercase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_a = AnimationGroup(
FadeOut(__lowercase , run_time=0.5 ) , MoveToTarget(__lowercase , run_time=0.5 ) , FadeIn(__lowercase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__lowercase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_a = 0.7
self.play(
Circumscribe(model_arr[i] , **__lowercase ) , Circumscribe(cpu_left_col_base[i] , **__lowercase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__lowercase , **__lowercase ) , Circumscribe(gpu_rect[0] , color=__lowercase , **__lowercase ) , Circumscribe(model_arr[i + 1] , color=__lowercase , **__lowercase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__lowercase , **__lowercase ) , Circumscribe(cpu_left_col_base[-1] , color=__lowercase , **__lowercase ) , Circumscribe(gpu_rect[0] , color=__lowercase , **__lowercase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_a = a_c
_a = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(__lowercase ) , FadeOut(__lowercase , run_time=0.5 ) , )
_a = MarkupText(f'Inference on a model too large for GPU memory\nis successfully completed.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) , MoveToTarget(__lowercase ) )
self.wait()
| 350 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[int]:
_a = 10
_a = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
_a = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(lowercase ) ),
} , features=lowercase , )
return dataset
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : int ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=lowercase )
return filename
# FILE_CONTENT + files
lowerCAmelCase_ : Union[str, Any] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> List[Any]:
_a = tmp_path_factory.mktemp("data" ) / "file.txt"
_a = FILE_CONTENT
with open(lowercase , "w" ) as f:
f.write(lowercase )
return filename
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> str:
import bza
_a = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
_a = bytes(lowercase , "utf-8" )
with bza.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> Optional[Any]:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
_a = bytes(lowercase , "utf-8" )
with gzip.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Union[str, Any]:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_a = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
_a = bytes(lowercase , "utf-8" )
with lza.frame.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple ) -> Optional[Any]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_a = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(lowercase , "w" ) as archive:
archive.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : Optional[Any] ) -> Dict:
import tarfile
_a = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any ) -> Union[str, Any]:
import lzma
_a = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
_a = bytes(lowercase , "utf-8" )
with lzma.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int , lowercase : Any ) -> Union[str, Any]:
import zipfile
_a = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict ) -> List[str]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_a = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
_a = bytes(lowercase , "utf-8" )
with zstd.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> Union[str, Any]:
_a = tmp_path_factory.mktemp("data" ) / "file.xml"
_a = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(lowercase , "w" ) as f:
f.write(lowercase )
return filename
lowerCAmelCase_ : Optional[int] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowerCAmelCase_ : List[Any] = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowerCAmelCase_ : Dict = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase_ : Dict = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowerCAmelCase_ : List[Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> str:
_a = datasets.Dataset.from_dict(lowercase )
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict ) -> Dict:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
_a = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> str:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(lowercase , "w" , newline="" ) as f:
_a = csv.DictWriter(lowercase , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(lowercase , "w" , newline="" ) as f:
_a = csv.DictWriter(lowercase , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> int:
import bza
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(lowercase , "rb" ) as f:
_a = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Any , lowercase : Any ) -> List[str]:
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : Any , lowercase : List[Any] ) -> Dict:
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(lowercase , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Optional[Any] , lowercase : int ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[Any] ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
_a = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(lowercase , "wb" ) as f:
_a = pq.ParquetWriter(lowercase , schema=lowercase )
_a = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase ) )] for k in DATA[0]} , schema=lowercase )
writer.write_table(lowercase )
writer.close()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_a = {"data": DATA}
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_a = {"data": DATA_DICT_OF_LISTS}
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] ) -> str:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> List[str]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> int:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] , lowercase : Dict ) -> Tuple:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(lowercase , "rb" ) as orig_file:
with gzip.open(lowercase , "wb" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : List[Any] ) -> List[Any]:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(lowercase , "rb" ) as orig_file:
with gzip.open(lowercase , "wb" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[Any] , lowercase : int ) -> str:
_a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : int , lowercase : List[Any] ) -> Optional[int]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("nested" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[str] , lowercase : str ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : Any , lowercase : Optional[int] ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str , lowercase : List[str] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.join("nested" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> str:
_a = ["0", "1", "2", "3"]
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> Dict:
_a = ["0", "1", "2", "3"]
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
_a = ["0", "1", "2", "3"]
_a = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Union[str, Any] , lowercase : Any ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : List[str] , lowercase : List[str] ) -> Union[str, Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : int , lowercase : str ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename("unsupported.ext" ) )
f.write(lowercase , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[Any] ) -> Any:
_a = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
_a = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[Any]:
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[int]:
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> Dict:
_a = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> str:
_a = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 346 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _lowerCamelCase ( ) -> int:
_a = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
_a = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(_UpperCAmelCase )
DownloadCommand.register_subcommand(_UpperCAmelCase )
EnvironmentCommand.register_subcommand(_UpperCAmelCase )
RunCommand.register_subcommand(_UpperCAmelCase )
ServeCommand.register_subcommand(_UpperCAmelCase )
UserCommands.register_subcommand(_UpperCAmelCase )
AddNewModelCommand.register_subcommand(_UpperCAmelCase )
AddNewModelLikeCommand.register_subcommand(_UpperCAmelCase )
LfsCommands.register_subcommand(_UpperCAmelCase )
PTtoTFCommand.register_subcommand(_UpperCAmelCase )
# Let's go
_a = parser.parse_args()
if not hasattr(_UpperCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
_a = args.func(_UpperCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 351 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv2ImageProcessor'
__a =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Dict , __a : int=None , __a : List[Any]=None , **__a : str ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Optional[int] , __a : Optional[Any] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Optional[Any] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : int ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : Union[str, Any] , *__a : Optional[int] , **__a : Optional[Any] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : int ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCamelCase__ ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 346 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Union[str, Any] = {
"""openai/imagegpt-small""": """""",
"""openai/imagegpt-medium""": """""",
"""openai/imagegpt-large""": """""",
}
class __SCREAMING_SNAKE_CASE (lowerCAmelCase_ ):
"""simple docstring"""
__a ="""imagegpt"""
__a =["""past_key_values"""]
__a ={
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Tuple , __a : List[Any]=5_12 + 1 , __a : Union[str, Any]=32 * 32 , __a : List[Any]=5_12 , __a : Dict=24 , __a : Optional[int]=8 , __a : str=None , __a : Dict="quick_gelu" , __a : Tuple=0.1 , __a : Optional[int]=0.1 , __a : Tuple=0.1 , __a : Optional[int]=1e-5 , __a : Optional[int]=0.02 , __a : Optional[int]=True , __a : List[Any]=True , __a : Optional[int]=False , __a : int=False , __a : Optional[Any]=False , **__a : Dict , ):
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = scale_attn_weights
_a = use_cache
_a = scale_attn_by_inverse_layer_idx
_a = reorder_and_upcast_attn
_a = tie_word_embeddings
super().__init__(tie_word_embeddings=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class __SCREAMING_SNAKE_CASE (lowerCAmelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : int ):
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def UpperCamelCase__ ( self : Any , __a : int , __a : List[Any] = 1 , __a : Any = -1 , __a : Union[str, Any] = False , __a : Dict = None , __a : Dict = 3 , __a : Any = 32 , __a : str = 32 , ):
_a = self._generate_dummy_images(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_a = dict(preprocessor(images=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) )
return inputs
| 352 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = '▁'
lowerCAmelCase_ : Optional[Any] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
lowerCAmelCase_ : Optional[int] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
lowerCAmelCase_ : List[str] = {
'facebook/s2t-small-librispeech-asr': 10_24,
}
lowerCAmelCase_ : List[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
lowerCAmelCase_ : Union[str, Any] = {'mustc': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =MAX_MODEL_INPUT_SIZES
__a =['input_ids', 'attention_mask']
__a =[]
def __init__( self : Optional[Any] , __a : Optional[Any] , __a : Any , __a : Any="<s>" , __a : List[str]="</s>" , __a : str="<pad>" , __a : List[str]="<unk>" , __a : Union[str, Any]=False , __a : Any=False , __a : List[str]=None , __a : Optional[int]=None , __a : Optional[Dict[str, Any]] = None , **__a : int , ):
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , do_upper_case=__a , do_lower_case=__a , tgt_lang=__a , lang_codes=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_a = do_upper_case
_a = do_lower_case
_a = load_json(__a )
_a = {v: k for k, v in self.encoder.items()}
_a = spm_file
_a = load_spm(__a , self.sp_model_kwargs )
if lang_codes is not None:
_a = lang_codes
_a = LANGUAGES[lang_codes]
_a = [f'<lang:{lang}>' for lang in self.langs]
_a = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
_a = self.lang_tokens
_a = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_a = {}
@property
def UpperCamelCase__ ( self : str ):
return len(self.encoder )
@property
def UpperCamelCase__ ( self : str ):
return self._tgt_lang
@tgt_lang.setter
def UpperCamelCase__ ( self : Optional[int] , __a : Any ):
_a = new_tgt_lang
self.set_tgt_lang_special_tokens(__a )
def UpperCamelCase__ ( self : List[Any] , __a : str ):
_a = self.lang_code_to_id[tgt_lang]
_a = [lang_code_id]
def UpperCamelCase__ ( self : Dict , __a : str ):
return self.sp_model.encode(__a , out_type=__a )
def UpperCamelCase__ ( self : List[str] , __a : Any ):
return self.encoder.get(__a , self.encoder[self.unk_token] )
def UpperCamelCase__ ( self : str , __a : int ):
return self.decoder.get(__a , self.unk_token )
def UpperCamelCase__ ( self : str , __a : List[str] ):
_a = []
_a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_a = self.sp_model.decode(__a )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_a = []
else:
current_sub_tokens.append(__a )
_a = self.sp_model.decode(__a )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCamelCase__ ( self : int , __a : Any , __a : int=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self : Any , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
_a = [1] * len(self.prefix_tokens )
_a = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self : str , __a : Dict ):
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase__ ( self : List[str] , __a : str , __a : Optional[str] = None ):
_a = Path(__a )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __a )
if os.path.abspath(self.spm_file ) != os.path.abspath(__a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __a )
elif not os.path.isfile(self.spm_file ):
with open(__a , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__a )
return (str(__a ), str(__a ))
def _lowerCamelCase ( lowercase : str , lowercase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_a = sentencepiece.SentencePieceProcessor(**lowercase )
spm.Load(str(lowercase ) )
return spm
def _lowerCamelCase ( lowercase : str ) -> Union[Dict, List]:
with open(lowercase , "r" ) as f:
return json.load(lowercase )
def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> None:
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase , indent=2 )
| 346 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE (snake_case_ ):
"""simple docstring"""
__a =(DEISMultistepScheduler,)
__a =(('num_inference_steps', 25),)
def UpperCamelCase__ ( self : Optional[Any] , **__a : List[str] ):
_a = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**__a )
return config
def UpperCamelCase__ ( self : Any , __a : Dict=0 , **__a : Dict ):
_a = dict(self.forward_default_kwargs )
_a = kwargs.pop("num_inference_steps" , __a )
_a = self.dummy_sample
_a = 0.1 * sample
_a = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_a = self.get_scheduler_config(**__a )
_a = scheduler_class(**__a )
scheduler.set_timesteps(__a )
# copy over dummy past residuals
_a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a )
_a = scheduler_class.from_pretrained(__a )
new_scheduler.set_timesteps(__a )
# copy over dummy past residuals
_a = dummy_past_residuals[: new_scheduler.config.solver_order]
_a , _a = sample, sample
for t in range(__a , time_step + scheduler.config.solver_order + 1 ):
_a = scheduler.step(__a , __a , __a , **__a ).prev_sample
_a = new_scheduler.step(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self : List[str] ):
pass
def UpperCamelCase__ ( self : Any , __a : Optional[Any]=0 , **__a : Dict ):
_a = dict(self.forward_default_kwargs )
_a = kwargs.pop("num_inference_steps" , __a )
_a = self.dummy_sample
_a = 0.1 * sample
_a = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
scheduler.set_timesteps(__a )
# copy over dummy past residuals (must be after setting timesteps)
_a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a )
_a = scheduler_class.from_pretrained(__a )
# copy over dummy past residuals
new_scheduler.set_timesteps(__a )
# copy over dummy past residual (must be after setting timesteps)
_a = dummy_past_residuals[: new_scheduler.config.solver_order]
_a = scheduler.step(__a , __a , __a , **__a ).prev_sample
_a = new_scheduler.step(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self : Union[str, Any] , __a : Optional[int]=None , **__a : Optional[Any] ):
if scheduler is None:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(**__a )
_a = scheduler_class(**__a )
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(**__a )
_a = scheduler_class(**__a )
_a = 10
_a = self.dummy_model()
_a = self.dummy_sample_deter
scheduler.set_timesteps(__a )
for i, t in enumerate(scheduler.timesteps ):
_a = model(__a , __a )
_a = scheduler.step(__a , __a , __a ).prev_sample
return sample
def UpperCamelCase__ ( self : Any ):
_a = dict(self.forward_default_kwargs )
_a = kwargs.pop("num_inference_steps" , __a )
for scheduler_class in self.scheduler_classes:
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
_a = self.dummy_sample
_a = 0.1 * sample
if num_inference_steps is not None and hasattr(__a , "set_timesteps" ):
scheduler.set_timesteps(__a )
elif num_inference_steps is not None and not hasattr(__a , "set_timesteps" ):
_a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_a = [residual + 0.2, residual + 0.15, residual + 0.10]
_a = dummy_past_residuals[: scheduler.config.solver_order]
_a = scheduler.timesteps[5]
_a = scheduler.timesteps[6]
_a = scheduler.step(__a , __a , __a , **__a ).prev_sample
_a = scheduler.step(__a , __a , __a , **__a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase__ ( self : Optional[Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_a = DEISMultistepScheduler(**self.get_scheduler_config() )
_a = self.full_loop(scheduler=__a )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
_a = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_a = DPMSolverMultistepScheduler.from_config(scheduler.config )
_a = UniPCMultistepScheduler.from_config(scheduler.config )
_a = DEISMultistepScheduler.from_config(scheduler.config )
_a = self.full_loop(scheduler=__a )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def UpperCamelCase__ ( self : Tuple ):
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=__a )
def UpperCamelCase__ ( self : Optional[int] ):
self.check_over_configs(thresholding=__a )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , algorithm_type="deis" , solver_order=__a , solver_type=__a , )
def UpperCamelCase__ ( self : Tuple ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def UpperCamelCase__ ( self : Optional[int] ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , )
_a = self.full_loop(
solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , )
assert not torch.isnan(__a ).any(), "Samples have nan numbers"
def UpperCamelCase__ ( self : Any ):
self.check_over_configs(lower_order_final=__a )
self.check_over_configs(lower_order_final=__a )
def UpperCamelCase__ ( self : List[str] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=__a , time_step=0 )
def UpperCamelCase__ ( self : List[Any] ):
_a = self.full_loop()
_a = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def UpperCamelCase__ ( self : Tuple ):
_a = self.full_loop(prediction_type="v_prediction" )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def UpperCamelCase__ ( self : List[str] ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(thresholding=__a , dynamic_thresholding_ratio=0 )
_a = scheduler_class(**__a )
_a = 10
_a = self.dummy_model()
_a = self.dummy_sample_deter.half()
scheduler.set_timesteps(__a )
for i, t in enumerate(scheduler.timesteps ):
_a = model(__a , __a )
_a = scheduler.step(__a , __a , __a ).prev_sample
assert sample.dtype == torch.floataa
| 353 |
'''simple docstring'''
from manim import *
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Dict ):
_a = Rectangle(height=0.5 , width=0.5 )
_a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_a = [mem.copy() for i in range(6 )]
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(__a , __a ).arrange(__a , buff=0 )
_a = Text("CPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
_a = [mem.copy() for i in range(4 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("GPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Model" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
_a = []
for i, rect in enumerate(__a ):
rect.set_stroke(__a )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_a = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__a , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__a , buff=0.0 )
self.add(__a )
cpu_targs.append(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Loaded Checkpoint" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , aligned_edge=__a , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
_a = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_a = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a ) , Write(__a ) )
self.play(Write(__a , run_time=1 ) , Create(__a , run_time=1 ) )
_a = []
_a = []
for i, rect in enumerate(__a ):
_a = fill.copy().set_fill(__a , opacity=0.7 )
target.move_to(__a )
first_animations.append(GrowFromCenter(__a , run_time=1 ) )
_a = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(*__a )
self.wait()
| 346 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ):
"""simple docstring"""
__a =['onnx']
def __init__( self : List[str] , *__a : Tuple , **__a : Optional[Any] ):
requires_backends(self , ["onnx"] )
@classmethod
def UpperCamelCase__ ( cls : str , *__a : Tuple , **__a : Union[str, Any] ):
requires_backends(cls , ["onnx"] )
@classmethod
def UpperCamelCase__ ( cls : Optional[Any] , *__a : Optional[int] , **__a : int ):
requires_backends(cls , ["onnx"] )
| 354 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowerCAmelCase_ : Tuple = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def _lowerCamelCase ( lowercase : List[Any] ) -> Optional[int]:
_a = test_results.split(" " )
_a = 0
_a = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_a = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCamelCase ( lowercase : str ) -> Optional[Any]:
_a = {}
_a = None
_a = False
for line in failures_short_lines.split("\n" ):
if re.search(r"_ \[doctest\]" , lowercase ):
_a = True
_a = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
_a = line
_a = False
return failures
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple , __a : str , __a : Dict ):
_a = title
_a = doc_test_results["time_spent"].split("," )[0]
_a = doc_test_results["success"]
_a = doc_test_results["failures"]
_a = self.n_success + self.n_failures
# Failures and success of the modeling tests
_a = doc_test_results
@property
def UpperCamelCase__ ( self : int ):
_a = [self._time_spent]
_a = 0
for time in time_spent:
_a = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__a ) == 1:
_a = [0, 0, time_parts[0]]
_a , _a , _a = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_a , _a , _a = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(__a )}h{int(__a )}m{int(__a )}s'
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : str ):
_a = 40
_a = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(__a , __a )}
_a = ""
for category, failures in category_failures.items():
if len(__a ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
_a = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__a )
@staticmethod
def UpperCamelCase__ ( ):
_a = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(__a )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=__a , )
def UpperCamelCase__ ( self : Tuple ):
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
_a = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
_a = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=__a , )
def UpperCamelCase__ ( self : Dict , __a : List[str] , __a : List[Any] , __a : Tuple , __a : int ):
_a = ""
for key, value in failures.items():
_a = value[:2_00] + " [Truncated]" if len(__a ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
_a = job_name
_a = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
_a = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCamelCase__ ( self : str ):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
_a = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
_a = sorted(self.doc_test_results.items() , key=lambda __a : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
_a = f'*Num failures* :{len(job_result["failed"] )} \n'
_a = job_result["failures"]
_a = self.get_reply_blocks(__a , __a , __a , text=__a )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=f'Results for {job}' , blocks=__a , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def _lowerCamelCase ( ) -> Any:
_a = os.environ["GITHUB_RUN_ID"]
_a = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
_a = requests.get(lowercase ).json()
_a = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_a = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowercase ):
_a = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase )
return {}
def _lowerCamelCase ( lowercase : str ) -> Dict:
_a = {}
if os.path.exists(lowercase ):
_a = os.listdir(lowercase )
for file in files:
try:
with open(os.path.join(lowercase , lowercase ) , encoding="utf-8" ) as f:
_a = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase , lowercase )}.' ) from e
return _artifact
def _lowerCamelCase ( ) -> str:
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , __a : str ):
_a = name
_a = []
def __str__( self : List[str] ):
return self.name
def UpperCamelCase__ ( self : str , __a : str ):
self.paths.append({"name": self.name, "path": path} )
_a = {}
_a = filter(os.path.isdir , os.listdir() )
for directory in directories:
_a = directory
if artifact_name not in _available_artifacts:
_a = Artifact(lowercase )
_available_artifacts[artifact_name].add_path(lowercase )
return _available_artifacts
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = get_job_links()
lowerCAmelCase_ : Any = retrieve_available_artifacts()
lowerCAmelCase_ : List[str] = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowerCAmelCase_ : Optional[Any] = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowerCAmelCase_ : int = github_actions_job_links.get('run_doctests')
lowerCAmelCase_ : Union[str, Any] = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
lowerCAmelCase_ : List[str] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = handle_test_results(artifact['stats'])
lowerCAmelCase_ : List[str] = failed
lowerCAmelCase_ : Optional[Any] = success
lowerCAmelCase_ : Tuple = time_spent[1:-1] + ', '
lowerCAmelCase_ : List[Any] = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
lowerCAmelCase_ : int = line.replace('FAILED ', '')
lowerCAmelCase_ : Optional[int] = line.split()[0].replace('\n', '')
if "::" in line:
lowerCAmelCase_ , lowerCAmelCase_ : str = line.split('::')
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowerCAmelCase_ : Union[str, Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowerCAmelCase_ : List[str] = all_failures[test] if test in all_failures else 'N/A'
lowerCAmelCase_ : Optional[Any] = failure
break
lowerCAmelCase_ : Tuple = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 346 | 0 |
Subsets and Splits