text
stringlengths
5
22M
id
stringlengths
12
177
metadata
dict
__index_level_0__
int64
0
1.37k
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-pruning Masked BERT on sequence classification on GLUE.""" import argparse import glob import json import logging import os import random import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from emmental import MaskedBertConfig, MaskedBertForSequenceClassification from transformers import ( WEIGHTS_NAME, AdamW, BertConfig, BertForSequenceClassification, BertTokenizer, get_linear_schedule_with_warmup, ) from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes as output_modes from transformers import glue_processors as processors try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter logger = logging.getLogger(__name__) MODEL_CLASSES = { "bert": (BertConfig, BertForSequenceClassification, BertTokenizer), "masked_bert": (MaskedBertConfig, MaskedBertForSequenceClassification, BertTokenizer), } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def schedule_threshold( step: int, total_step: int, warmup_steps: int, initial_threshold: float, final_threshold: float, initial_warmup: int, final_warmup: int, final_lambda: float, ): if step <= initial_warmup * warmup_steps: threshold = initial_threshold elif step > (total_step - final_warmup * warmup_steps): threshold = final_threshold else: spars_warmup_steps = initial_warmup * warmup_steps spars_schedu_steps = (final_warmup + initial_warmup) * warmup_steps mul_coeff = 1 - (step - spars_warmup_steps) / (total_step - spars_schedu_steps) threshold = final_threshold + (initial_threshold - final_threshold) * (mul_coeff ** 3) regu_lambda = final_lambda * threshold / final_threshold return threshold, regu_lambda def regularization(model: nn.Module, mode: str): regu, counter = 0, 0 for name, param in model.named_parameters(): if "mask_scores" in name: if mode == "l1": regu += torch.norm(torch.sigmoid(param), p=1) / param.numel() elif mode == "l0": regu += torch.sigmoid(param - 2 / 3 * np.log(0.1 / 1.1)).sum() / param.numel() else: ValueError("Don't know this mode.") counter += 1 return regu / counter def train(args, train_dataset, model, tokenizer, teacher=None): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter(log_dir=args.output_dir) args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if "mask_score" in n and p.requires_grad], "lr": args.mask_scores_learning_rate, }, { "params": [ p for n, p in model.named_parameters() if "mask_score" not in n and p.requires_grad and not any(nd in n for nd in no_decay) ], "lr": args.learning_rate, "weight_decay": args.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if "mask_score" not in n and p.requires_grad and any(nd in n for nd in no_decay) ], "lr": args.learning_rate, "weight_decay": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Check if saved optimizer or scheduler states exist if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile( os.path.join(args.model_name_or_path, "scheduler.pt") ): # Load in optimizer and scheduler states optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True, ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) # Distillation if teacher is not None: logger.info(" Training with distillation") global_step = 0 # Global TopK if args.global_topk: threshold_mem = None epochs_trained = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path): # set global_step to global_step of last saved checkpoint from model path try: global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0]) except ValueError: global_step = 0 epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps) logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0], ) set_seed(args) # Added here for reproducibility for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple(t.to(args.device) for t in batch) threshold, regu_lambda = schedule_threshold( step=global_step, total_step=t_total, warmup_steps=args.warmup_steps, final_threshold=args.final_threshold, initial_threshold=args.initial_threshold, final_warmup=args.final_warmup, initial_warmup=args.initial_warmup, final_lambda=args.final_lambda, ) # Global TopK if args.global_topk: if threshold == 1.0: threshold = -1e2 # Or an indefinitely low quantity else: if (threshold_mem is None) or (global_step % args.global_topk_frequency_compute == 0): # Sort all the values to get the global topK concat = torch.cat( [param.view(-1) for name, param in model.named_parameters() if "mask_scores" in name] ) n = concat.numel() kth = max(n - (int(n * threshold) + 1), 1) threshold_mem = concat.kthvalue(kth).values.item() threshold = threshold_mem else: threshold = threshold_mem inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "masked_bert", "xlnet", "albert"] else None ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids if "masked" in args.model_type: inputs["threshold"] = threshold outputs = model(**inputs) loss, logits_stu = outputs # model outputs are always tuple in transformers (see doc) # Distillation loss if teacher is not None: if "token_type_ids" not in inputs: inputs["token_type_ids"] = None if args.teacher_type == "xlm" else batch[2] with torch.no_grad(): (logits_tea,) = teacher( input_ids=inputs["input_ids"], token_type_ids=inputs["token_type_ids"], attention_mask=inputs["attention_mask"], ) loss_logits = ( F.kl_div( input=F.log_softmax(logits_stu / args.temperature, dim=-1), target=F.softmax(logits_tea / args.temperature, dim=-1), reduction="batchmean", ) * (args.temperature ** 2) ) loss = args.alpha_distil * loss_logits + args.alpha_ce * loss # Regularization if args.regularization is not None: regu_ = regularization(model=model, mode=args.regularization) loss = loss + regu_lambda * regu_ if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0 or ( # last step in epoch but step is always smaller than gradient_accumulation_steps len(epoch_iterator) <= args.gradient_accumulation_steps and (step + 1) == len(epoch_iterator) ): if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: tb_writer.add_scalar("threshold", threshold, global_step) for name, param in model.named_parameters(): if not param.requires_grad: continue tb_writer.add_scalar("parameter_mean/" + name, param.data.mean(), global_step) tb_writer.add_scalar("parameter_std/" + name, param.data.std(), global_step) tb_writer.add_scalar("parameter_min/" + name, param.data.min(), global_step) tb_writer.add_scalar("parameter_max/" + name, param.data.max(), global_step) tb_writer.add_scalar("grad_mean/" + name, param.grad.data.mean(), global_step) tb_writer.add_scalar("grad_std/" + name, param.grad.data.std(), global_step) if args.regularization is not None and "mask_scores" in name: if args.regularization == "l1": perc = (torch.sigmoid(param) > threshold).sum().item() / param.numel() elif args.regularization == "l0": perc = (torch.sigmoid(param - 2 / 3 * np.log(0.1 / 1.1))).sum().item() / param.numel() tb_writer.add_scalar("retained_weights_perc/" + name, perc, global_step) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: logs = {} if ( args.local_rank == -1 and args.evaluate_during_training ): # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): eval_key = "eval_{}".format(key) logs[eval_key] = value loss_scalar = (tr_loss - logging_loss) / args.logging_steps learning_rate_scalar = scheduler.get_lr() logs["learning_rate"] = learning_rate_scalar[0] if len(learning_rate_scalar) > 1: for idx, lr in enumerate(learning_rate_scalar[1:]): logs[f"learning_rate/{idx+1}"] = lr logs["loss"] = loss_scalar if teacher is not None: logs["loss/distil"] = loss_logits.item() if args.regularization is not None: logs["loss/regularization"] = regu_.item() if (teacher is not None) or (args.regularization is not None): if (teacher is not None) and (args.regularization is not None): logs["loss/instant_ce"] = ( loss.item() - regu_lambda * logs["loss/regularization"] - args.alpha_distil * logs["loss/distil"] ) / args.alpha_ce elif teacher is not None: logs["loss/instant_ce"] = ( loss.item() - args.alpha_distil * logs["loss/distil"] ) / args.alpha_ce else: logs["loss/instant_ce"] = loss.item() - regu_lambda * logs["loss/regularization"] logging_loss = tr_loss for key, value in logs.items(): tb_writer.add_scalar(key, value, global_step) print(json.dumps({**logs, **{"step": global_step}})) if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): # Loop to handle MNLI double evaluation (matched, mis-matched) eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) eval_outputs_dirs = (args.output_dir, args.output_dir + "/MM") if args.task_name == "mnli" else (args.output_dir,) results = {} for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu eval if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None # Global TopK if args.global_topk: threshold_mem = None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "masked_bert", "xlnet", "albert"] else None ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids if "masked" in args.model_type: inputs["threshold"] = args.final_threshold if args.global_topk: if threshold_mem is None: concat = torch.cat( [param.view(-1) for name, param in model.named_parameters() if "mask_scores" in name] ) n = concat.numel() kth = max(n - (int(n * args.final_threshold) + 1), 1) threshold_mem = concat.kthvalue(kth).values.item() inputs["threshold"] = threshold_mem outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs["labels"].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps if args.output_mode == "classification": from scipy.special import softmax probs = softmax(preds, axis=-1) entropy = np.exp((-probs * np.log(probs)).sum(axis=-1).mean()) preds = np.argmax(preds, axis=1) elif args.output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(eval_task, preds, out_label_ids) results.update(result) if entropy is not None: result["eval_avg_entropy"] = entropy output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return results def load_and_cache_examples(args, task, tokenizer, evaluate=False): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache processor = processors[task]() output_mode = output_modes[task] # Load data features from cache or dataset file cached_features_file = os.path.join( args.data_dir, "cached_{}_{}_{}_{}".format( "dev" if evaluate else "train", list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), str(task), ), ) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) label_list = processor.get_labels() if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]: # HACK(label indices are swapped in RoBERTa pretrained model) label_list[1], label_list[2] = label_list[2], label_list[1] examples = ( processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir) ) features = convert_examples_to_features( examples, tokenizer, max_length=args.max_seq_length, label_list=label_list, output_mode=output_mode, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) if output_mode == "classification": all_labels = torch.tensor([f.label for f in features], dtype=torch.long) elif output_mode == "regression": all_labels = torch.tensor([f.label for f in features], dtype=torch.float) dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels) return dataset def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.", ) parser.add_argument( "--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()), ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--task_name", default=None, type=str, required=True, help="The name of the task to train selected in the list: " + ", ".join(processors.keys()), ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.", ) # Other parameters parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from huggingface.co", ) parser.add_argument( "--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step.", ) parser.add_argument( "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.", ) parser.add_argument( "--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.", ) parser.add_argument( "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.", ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") # Pruning parameters parser.add_argument( "--mask_scores_learning_rate", default=1e-2, type=float, help="The Adam initial learning rate of the mask scores.", ) parser.add_argument( "--initial_threshold", default=1.0, type=float, help="Initial value of the threshold (for scheduling)." ) parser.add_argument( "--final_threshold", default=0.7, type=float, help="Final value of the threshold (for scheduling)." ) parser.add_argument( "--initial_warmup", default=1, type=int, help="Run `initial_warmup` * `warmup_steps` steps of threshold warmup during which threshold stays" "at its `initial_threshold` value (sparsity schedule).", ) parser.add_argument( "--final_warmup", default=2, type=int, help="Run `final_warmup` * `warmup_steps` steps of threshold cool-down during which threshold stays" "at its final_threshold value (sparsity schedule).", ) parser.add_argument( "--pruning_method", default="topK", type=str, help="Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning, sigmoied_threshold = Soft movement pruning).", ) parser.add_argument( "--mask_init", default="constant", type=str, help="Initialization method for the mask scores. Choices: constant, uniform, kaiming.", ) parser.add_argument( "--mask_scale", default=0.0, type=float, help="Initialization parameter for the chosen initialization method." ) parser.add_argument("--regularization", default=None, help="Add L0 or L1 regularization to the mask scores.") parser.add_argument( "--final_lambda", default=0.0, type=float, help="Regularization intensity (used in conjunction with `regularization`.", ) parser.add_argument("--global_topk", action="store_true", help="Global TopK on the Scores.") parser.add_argument( "--global_topk_frequency_compute", default=25, type=int, help="Frequency at which we compute the TopK global threshold.", ) # Distillation parameters (optional) parser.add_argument( "--teacher_type", default=None, type=str, help="Teacher type. Teacher tokenizer and student (model) tokenizer must output the same tokenization. Only for distillation.", ) parser.add_argument( "--teacher_name_or_path", default=None, type=str, help="Path to the already fine-tuned teacher model. Only for distillation.", ) parser.add_argument( "--alpha_ce", default=0.5, type=float, help="Cross entropy loss linear weight. Only for distillation." ) parser.add_argument( "--alpha_distil", default=0.5, type=float, help="Distillation loss linear weight. Only for distillation." ) parser.add_argument( "--temperature", default=2.0, type=float, help="Distillation temperature. Only for distillation." ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.", ) parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available") parser.add_argument( "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory", ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets", ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html", ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") args = parser.parse_args() # Regularization if args.regularization == "null": args.regularization = None if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir ): raise ValueError( f"Output directory ({args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome." ) # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() else: # Initializes the distributed backend which will take care of synchronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set seed set_seed(args) # Prepare GLUE task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() args.output_mode = output_modes[args.task_name] label_list = processor.get_labels() num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name, cache_dir=args.cache_dir if args.cache_dir else None, pruning_method=args.pruning_method, mask_init=args.mask_init, mask_scale=args.mask_scale, ) tokenizer = tokenizer_class.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None, do_lower_case=args.do_lower_case, ) model = model_class.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None, ) if args.teacher_type is not None: assert args.teacher_name_or_path is not None assert args.alpha_distil > 0.0 assert args.alpha_distil + args.alpha_ce > 0.0 teacher_config_class, teacher_model_class, _ = MODEL_CLASSES[args.teacher_type] teacher_config = teacher_config_class.from_pretrained(args.teacher_name_or_path) teacher = teacher_model_class.from_pretrained( args.teacher_name_or_path, from_tf=False, config=teacher_config, cache_dir=args.cache_dir if args.cache_dir else None, ) teacher.to(args.device) else: teacher = None if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer, teacher=teacher) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) model.to(args.device) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) ) logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix) result = dict((k + "_{}".format(global_step), v) for k, v in result.items()) results.update(result) return results if __name__ == "__main__": main()
AdaMix/examples/research_projects/movement-pruning/masked_run_glue.py/0
{ "file_path": "AdaMix/examples/research_projects/movement-pruning/masked_run_glue.py", "repo_id": "AdaMix", "token_count": 18214 }
42
# Intro Authors: @patrickvonplaten and @lhoestq Aimed at tackling the knowledge-intensive NLP tasks (think tasks a human wouldn't be expected to solve without access to external knowledge sources), RAG models are seq2seq models with access to a retrieval mechanism providing relevant context documents at training and evaluation time. A RAG model encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs are passed to the generator. Read more about RAG at https://arxiv.org/abs/2005.11401. # Finetuning Our finetuning logic is based on scripts from [`examples/seq2seq`](https://github.com/huggingface/transformers/tree/master/examples/seq2seq). We accept training data in the same format as specified there - we expect a directory consisting of 6 text files: ```bash train.source train.target val.source val.target test.source test.target ``` A sample finetuning command (run ` ./examples/research_projects/rag/finetune_rag.py --help` to list all available options): ```bash python examples/research_projects/rag/finetune_rag.py \ --data_dir $DATA_DIR \ --output_dir $OUTPUT_DIR \ --model_name_or_path $MODEL_NAME_OR_PATH \ --model_type rag_sequence \ --fp16 \ --gpus 8 ``` We publish two `base` models which can serve as a starting point for finetuning on downstream tasks (use them as `model_name_or_path`): - [`facebook/rag-sequence-base`](https://huggingface.co/facebook/rag-sequence-base) - a base for finetuning `RagSequenceForGeneration` models, - [`facebook/rag-token-base`](https://huggingface.co/facebook/rag-token-base) - a base for finetuning `RagTokenForGeneration` models. The `base` models initialize the question encoder with [`facebook/dpr-question_encoder-single-nq-base`](https://huggingface.co/facebook/dpr-question_encoder-single-nq-base) and the generator with [`facebook/bart-large`](https://huggingface.co/facebook/bart-large). If you would like to initialize finetuning with a base model using different question encoder and generator architectures, you can build it with a consolidation script, e.g.: ``` python examples/research_projects/rag/consolidate_rag_checkpoint.py \ --model_type rag_sequence \ --generator_name_or_path facebook/bart-large-cnn \ --question_encoder_name_or_path facebook/dpr-question_encoder-single-nq-base \ --dest path/to/checkpoint ``` You will then be able to pass `path/to/checkpoint` as `model_name_or_path` to the `finetune_rag.py` script. ## Document Retrieval When running distributed fine-tuning, each training worker needs to retrieve contextual documents for its input by querying a index loaded into memory. RAG provides two implementations for document retrieval, one with [`torch.distributed`](https://pytorch.org/docs/stable/distributed.html) communication package and the other with [`Ray`](https://docs.ray.io/en/master/). This option can be configured with the `--distributed_retriever` flag which can either be set to `pytorch` or `ray`. By default this flag is set to `pytorch`. For the Pytorch implementation, only training worker 0 loads the index into CPU memory, and a gather/scatter pattern is used to collect the inputs from the other training workers and send back the corresponding document embeddings. For the Ray implementation, the index is loaded in *separate* process(es). The training workers randomly select which retriever worker to query. To use Ray for distributed retrieval, you have to set the `--distributed_retriever` arg to `ray`. To configure the number of retrieval workers (the number of processes that load the index), you can set the `num_retrieval_workers` flag. Also make sure to start the Ray cluster before running fine-tuning. ```bash # Start a single-node Ray cluster. ray start --head python examples/research_projects/rag/finetune_rag.py \ --data_dir $DATA_DIR \ --output_dir $OUTPUT_DIR \ --model_name_or_path $MODEL_NAME_OR_PATH \ --model_type rag_sequence \ --fp16 \ --gpus 8 --distributed_retriever ray \ --num_retrieval_workers 4 # Stop the ray cluster once fine-tuning has finished. ray stop ``` Using Ray can lead to retrieval speedups on multi-GPU settings since multiple processes load the index rather than just the rank 0 training worker. Using Ray also allows you to load the index on GPU since the index is loaded on a separate processes than the model, while with pytorch distributed retrieval, both are loaded in the same process potentially leading to GPU OOM. # Evaluation Our evaluation script enables two modes of evaluation (controlled by the `eval_mode` argument): `e2e` - end2end evaluation, returns EM (exact match) and F1 scores calculated for the downstream task and `retrieval` - which returns precision@k of the documents retrieved for provided inputs. The evaluation script expects paths to two files: - `evaluation_set` - a path to a file specifying the evaluation dataset, a single input per line. - `gold_data_path` - a path to a file contaning ground truth answers for datapoints from the `evaluation_set`, a single output per line. Check below for expected formats of the gold data files. ## Retrieval evaluation For `retrieval` evaluation, we expect a gold data file where each line will consist of a tab-separated list of document titles constituting positive contexts for respective datapoints from the `evaluation_set`. E.g. given a question `who sings does he love me with reba` in the `evaluation_set`, a respective ground truth line could look as follows: ``` Does He Love You Does He Love You Red Sandy Spika dress of Reba McEntire Greatest Hits Volume Two (Reba McEntire album) Shoot for the Moon (album) ``` We demonstrate how to evaluate retrieval against DPR evaluation data. You can download respective files from links listed [here](https://github.com/facebookresearch/DPR/blob/master/data/download_data.py#L39-L45). 1. Download and unzip the gold data file. We use the `biencoder-nq-dev` from https://dl.fbaipublicfiles.com/dpr/data/retriever/biencoder-nq-dev.json.gz. ```bash wget https://dl.fbaipublicfiles.com/dpr/data/retriever/biencoder-nq-dev.json.gz && gzip -d biencoder-nq-dev.json.gz ``` 2. Parse the unziped file using the `parse_dpr_relevance_data.py` ```bash mkdir output # or wherever you want to save this python examples/research_projects/rag/parse_dpr_relevance_data.py \ --src_path biencoder-nq-dev.json \ --evaluation_set output/biencoder-nq-dev.questions \ --gold_data_path output/biencoder-nq-dev.pages ``` 3. Run evaluation: ```bash python examples/research_projects/rag/eval_rag.py \ --model_name_or_path facebook/rag-sequence-nq \ --model_type rag_sequence \ --evaluation_set output/biencoder-nq-dev.questions \ --gold_data_path output/biencoder-nq-dev.pages \ --predictions_path output/retrieval_preds.tsv \ --eval_mode retrieval \ --k 1 ``` ```bash # EXPLANATION python examples/research_projects/rag/eval_rag.py \ --model_name_or_path facebook/rag-sequence-nq \ # model name or path of the model we're evaluating --model_type rag_sequence \ # RAG model type (rag_token or rag_sequence) --evaluation_set output/biencoder-nq-dev.questions \ # an input dataset for evaluation --gold_data_path poutput/biencoder-nq-dev.pages \ # a dataset containing ground truth answers for samples from the evaluation_set --predictions_path output/retrieval_preds.tsv \ # name of file where predictions will be stored --eval_mode retrieval \ # indicates whether we're performing retrieval evaluation or e2e evaluation --k 1 # parameter k for the precision@k metric ``` ## End-to-end evaluation We support two formats of the gold data file (controlled by the `gold_data_mode` parameter): - `qa` - where a single line has the following format: `input [tab] output_list`, e.g.: ``` who is the owner of reading football club ['Xiu Li Dai', 'Dai Yongge', 'Dai Xiuli', 'Yongge Dai'] ``` - `ans` - where a single line contains a single expected answer, e.g.: ``` Xiu Li Dai ``` Predictions of the model for the samples from the `evaluation_set` will be saved under the path specified by the `predictions_path` parameter. If this path already exists, the script will use saved predictions to calculate metrics. Add `--recalculate` parameter to force the script to perform inference from scratch. An example e2e evaluation run could look as follows: ```bash python examples/research_projects/rag/eval_rag.py \ --model_name_or_path facebook/rag-sequence-nq \ --model_type rag_sequence \ --evaluation_set path/to/test.source \ --gold_data_path path/to/gold_data \ --predictions_path path/to/e2e_preds.txt \ --eval_mode e2e \ --gold_data_mode qa \ --n_docs 5 \ # You can experiment with retrieving different number of documents at evaluation time --print_predictions \ --recalculate \ # adding this parameter will force recalculating predictions even if predictions_path already exists ``` # Use your own knowledge source By default, RAG uses the English Wikipedia as a knowledge source, known as the 'wiki_dpr' dataset. With `use_custom_knowledge_dataset.py` you can build your own knowledge source, *e.g.* for RAG. For instance, if documents are serialized as tab-separated csv files with the columns "title" and "text", one can use `use_own_knowledge_dataset.py` as follows: ```bash python examples/research_projects/rag/use_own_knowledge_dataset.py \ --csv_path path/to/my_csv \ --output_dir path/to/my_knowledge_dataset \ ``` The created outputs in `path/to/my_knowledge_dataset` can then be used to finetune RAG as follows: ```bash python examples/research_projects/rag/finetune_rag.py \ --data_dir $DATA_DIR \ --output_dir $OUTPUT_DIR \ --model_name_or_path $MODEL_NAME_OR_PATH \ --model_type rag_sequence \ --fp16 \ --gpus 8 --index_name custom --passages_path path/to/data/my_knowledge_dataset --index_path path/to/my_knowledge_dataset_hnsw_index.faiss ```
AdaMix/examples/research_projects/rag/README.md/0
{ "file_path": "AdaMix/examples/research_projects/rag/README.md", "repo_id": "AdaMix", "token_count": 3258 }
43
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import torch from datasets import Features, Sequence, Value, load_dataset import faiss from transformers import ( DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser, RagRetriever, RagSequenceForGeneration, RagTokenizer, ) logger = logging.getLogger(__name__) torch.set_grad_enabled(False) device = "cuda" if torch.cuda.is_available() else "cpu" def split_text(text: str, n=100, character=" ") -> List[str]: """Split the text every ``n``-th occurrence of ``character``""" text = text.split(character) return [character.join(text[i : i + n]).strip() for i in range(0, len(text), n)] def split_documents(documents: dict) -> dict: """Split documents into passages""" titles, texts = [], [] for title, text in zip(documents["title"], documents["text"]): if text is not None: for passage in split_text(text): titles.append(title if title is not None else "") texts.append(passage) return {"title": titles, "text": texts} def embed(documents: dict, ctx_encoder: DPRContextEncoder, ctx_tokenizer: DPRContextEncoderTokenizerFast) -> dict: """Compute the DPR embeddings of document passages""" input_ids = ctx_tokenizer( documents["title"], documents["text"], truncation=True, padding="longest", return_tensors="pt" )["input_ids"] embeddings = ctx_encoder(input_ids.to(device=device), return_dict=True).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def main( rag_example_args: "RagExampleArguments", processing_args: "ProcessingArguments", index_hnsw_args: "IndexHnswArguments", ): ###################################### logger.info("Step 1 - Create the dataset") ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file" # You can load a Dataset object this way dataset = load_dataset( "csv", data_files=[rag_example_args.csv_path], split="train", delimiter="\t", column_names=["title", "text"] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words dataset = dataset.map(split_documents, batched=True, num_proc=processing_args.num_proc) # And compute the embeddings ctx_encoder = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=device) ctx_tokenizer = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name) new_features = Features( {"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))} ) # optional, save as float32 instead of float64 to save space dataset = dataset.map( partial(embed, ctx_encoder=ctx_encoder, ctx_tokenizer=ctx_tokenizer), batched=True, batch_size=processing_args.batch_size, features=new_features, ) # And finally save your dataset passages_path = os.path.join(rag_example_args.output_dir, "my_knowledge_dataset") dataset.save_to_disk(passages_path) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("Step 2 - Index the dataset") ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search index = faiss.IndexHNSWFlat(index_hnsw_args.d, index_hnsw_args.m, faiss.METRIC_INNER_PRODUCT) dataset.add_faiss_index("embeddings", custom_index=index) # And save the index index_path = os.path.join(rag_example_args.output_dir, "my_knowledge_dataset_hnsw_index.faiss") dataset.get_index("embeddings").save(index_path) # dataset.load_faiss_index("embeddings", index_path) # to reload the index ###################################### logger.info("Step 3 - Load RAG") ###################################### # Easy way to load the model retriever = RagRetriever.from_pretrained( rag_example_args.rag_model_name, index_name="custom", indexed_dataset=dataset ) model = RagSequenceForGeneration.from_pretrained(rag_example_args.rag_model_name, retriever=retriever) tokenizer = RagTokenizer.from_pretrained(rag_example_args.rag_model_name) # For distributed fine-tuning you'll need to provide the paths instead, as the dataset and the index are loaded separately. # retriever = RagRetriever.from_pretrained(rag_model_name, index_name="custom", passages_path=passages_path, index_path=index_path) ###################################### logger.info("Step 4 - Have fun") ###################################### question = rag_example_args.question or "What does Moses' rod turn into ?" input_ids = tokenizer.question_encoder(question, return_tensors="pt")["input_ids"] generated = model.generate(input_ids) generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True)[0] logger.info("Q: " + question) logger.info("A: " + generated_string) @dataclass class RagExampleArguments: csv_path: str = field( default=str(Path(__file__).parent / "test_data" / "my_knowledge_dataset.csv"), metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"}, ) question: Optional[str] = field( default=None, metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."}, ) rag_model_name: str = field( default="facebook/rag-sequence-nq", metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"}, ) dpr_ctx_encoder_model_name: str = field( default="facebook/dpr-ctx_encoder-multiset-base", metadata={ "help": "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or 'facebook/dpr-ctx_encoder-multiset-base'" }, ) output_dir: Optional[str] = field( default=None, metadata={"help": "Path to a directory where the dataset passages and the index will be saved"}, ) @dataclass class ProcessingArguments: num_proc: Optional[int] = field( default=None, metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." }, ) batch_size: int = field( default=16, metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." }, ) @dataclass class IndexHnswArguments: d: int = field( default=768, metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."}, ) m: int = field( default=128, metadata={ "help": "The number of bi-directional links created for every new element during the HNSW index construction." }, ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) parser = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) rag_example_args, processing_args, index_hnsw_args = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: rag_example_args.output_dir = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
AdaMix/examples/research_projects/rag/use_own_knowledge_dataset.py/0
{ "file_path": "AdaMix/examples/research_projects/rag/use_own_knowledge_dataset.py", "repo_id": "AdaMix", "token_count": 2920 }
44
#!/usr/bin/env bash export PYTHONPATH="../":"${PYTHONPATH}" # From appendix C of paper https://arxiv.org/abs/1912.08777 # Set --gradient_accumulation_steps so that effective batch size is 256 (2*128, 4*64, 8*32, 16*16) python finetune.py \ --learning_rate=1e-4 \ --do_train \ --do_predict \ --n_val 1000 \ --val_check_interval 0.25 \ --max_source_length 512 --max_target_length 56 \ --freeze_embeds --label_smoothing 0.1 --adafactor --task summarization_xsum \ "$@"
AdaMix/examples/research_projects/seq2seq-distillation/finetune_pegasus_xsum.sh/0
{ "file_path": "AdaMix/examples/research_projects/seq2seq-distillation/finetune_pegasus_xsum.sh", "repo_id": "AdaMix", "token_count": 208 }
45
#!/usr/bin/env python3 from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import datasets import numpy as np import torch import torch.nn as nn from packaging import version import soundfile as sf from transformers import ( HfArgumentParser, Trainer, TrainingArguments, Wav2Vec2ForCTC, Wav2Vec2Processor, is_apex_available, ) if is_apex_available(): from apex import amp if version.parse(torch.__version__) >= version.parse("1.6"): _is_native_amp_available = True from torch.cuda.amp import autocast @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) freeze_feature_extractor: Optional[bool] = field( default=True, metadata={"help": "Whether to freeze the feature extractor layers of the model."} ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ dataset_name: str = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_split_name: Optional[str] = field( default="train", metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) @dataclass class DataCollatorCTCWithPadding: """ Data collator that will dynamically pad the inputs received. Args: processor (:class:`~transformers.Wav2Vec2Processor`) The processor used for proccessing the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length of the ``input_values`` of the returned list and optionally padding length (see above). max_length_labels (:obj:`int`, `optional`): Maximum length of the ``labels`` returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). """ processor: Wav2Vec2Processor padding: Union[bool, str] = True max_length: Optional[int] = None max_length_labels: Optional[int] = None pad_to_multiple_of: Optional[int] = None pad_to_multiple_of_labels: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lenghts and need # different padding methods input_features = [{"input_values": feature["input_values"]} for feature in features] label_features = [{"input_ids": feature["labels"]} for feature in features] batch = self.processor.pad( input_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", ) with self.processor.as_target_processor(): labels_batch = self.processor.pad( label_features, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors="pt", ) # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) batch["labels"] = labels return batch class CTCTrainer(Trainer): def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: """ Perform a training step on a batch of inputs. Subclass and override to inject custom behavior. Args: model (:obj:`nn.Module`): The model to train. inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument :obj:`labels`. Check your model's documentation for all accepted arguments. Return: :obj:`torch.Tensor`: The tensor with training loss on this batch. """ model.train() inputs = self._prepare_inputs(inputs) if self.use_amp: with autocast(): loss = self.compute_loss(model, inputs) else: loss = self.compute_loss(model, inputs) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == "mean": loss = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": loss = loss.sum() / (inputs["labels"] >= 0).sum() else: raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']") if self.args.gradient_accumulation_steps > 1: loss = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(loss).backward() elif self.use_apex: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(loss) else: loss.backward() return loss.detach() def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) model_args, data_args, training_args = parser.parse_args_into_dataclasses() model = Wav2Vec2ForCTC.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir) processor = Wav2Vec2Processor.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir) train_dataset = datasets.load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name ) val_dataset = datasets.load_dataset(data_args.dataset_name, data_args.dataset_config_name, split="validation") wer_metric = datasets.load_metric("wer") def map_to_array(batch): speech_array, sampling_rate = sf.read(batch["file"]) batch["speech"] = speech_array batch["sampling_rate"] = sampling_rate return batch train_dataset = train_dataset.map(map_to_array, remove_columns=["file"]) val_dataset = val_dataset.map(map_to_array, remove_columns=["file"]) def prepare_dataset(batch): # check that all files have the correct sampling rate assert ( len(set(batch["sampling_rate"])) == 1 ), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}." batch["input_values"] = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0]).input_values with processor.as_target_processor(): batch["labels"] = processor(batch["text"]).input_ids return batch train_dataset = train_dataset.map( prepare_dataset, batch_size=training_args.per_device_train_batch_size, batched=True, num_proc=data_args.preprocessing_num_workers, ) val_dataset = val_dataset.map( prepare_dataset, batch_size=training_args.per_device_train_batch_size, batched=True, num_proc=data_args.preprocessing_num_workers, ) data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True) def compute_metrics(pred): pred_logits = pred.predictions pred_ids = np.argmax(pred_logits, axis=-1) pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id pred_str = processor.batch_decode(pred_ids) # we do not want to group tokens when computing the metrics label_str = processor.batch_decode(pred.label_ids, group_tokens=False) wer = wer_metric.compute(predictions=pred_str, references=label_str) return {"wer": wer} if model_args.freeze_feature_extractor: model.freeze_feature_extractor() trainer = CTCTrainer( model=model, data_collator=data_collator, args=training_args, compute_metrics=compute_metrics, train_dataset=train_dataset, eval_dataset=val_dataset, tokenizer=processor.feature_extractor, ) trainer.train() if __name__ == "__main__": main()
AdaMix/examples/research_projects/wav2vec2/run_asr.py/0
{ "file_path": "AdaMix/examples/research_projects/wav2vec2/run_asr.py", "repo_id": "AdaMix", "token_count": 4278 }
46
[tool.black] line-length = 119 target-version = ['py35']
AdaMix/pyproject.toml/0
{ "file_path": "AdaMix/pyproject.toml", "repo_id": "AdaMix", "token_count": 21 }
47
#!/usr/bin/env python # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script builds a small sample spm file tests/fixtures/test_sentencepiece_no_bos.model, with features needed by pegasus # 1. pip install sentencepiece # # 2. wget https://raw.githubusercontent.com/google/sentencepiece/master/data/botchan.txt # 3. build import sentencepiece as spm # pegasus: # 1. no bos # 2. eos_id is 1 # 3. unk_id is 2 # build a sample spm file accordingly spm.SentencePieceTrainer.train('--input=botchan.txt --model_prefix=test_sentencepiece_no_bos --bos_id=-1 --unk_id=2 --eos_id=1 --vocab_size=1000') # 4. now update the fixture # mv test_sentencepiece_no_bos.model ../../tests/fixtures/
AdaMix/scripts/pegasus/build_test_sample_spm_no_bos.py/0
{ "file_path": "AdaMix/scripts/pegasus/build_test_sample_spm_no_bos.py", "repo_id": "AdaMix", "token_count": 393 }
48
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter _has_cookiecutter = True except ImportError: _has_cookiecutter = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name def add_new_model_command_factory(args: Namespace): return AddNewModelCommand(args.testing, args.testing_file, path=args.path) class AddNewModelCommand(BaseTransformersCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): add_new_model_parser = parser.add_parser("add-new-model") add_new_model_parser.add_argument("--testing", action="store_true", help="If in testing mode.") add_new_model_parser.add_argument("--testing_file", type=str, help="Configuration file on which to run.") add_new_model_parser.add_argument( "--path", type=str, help="Path to cookiecutter. Should only be used for testing purposes." ) add_new_model_parser.set_defaults(func=add_new_model_command_factory) def __init__(self, testing: bool, testing_file: str, path=None, *args): self._testing = testing self._testing_file = testing_file self._path = path def run(self): if not _has_cookiecutter: raise ImportError( "Model creation dependencies are required to use the `add_new_model` command. Install them by running " "the folowing at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory directories = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]] if len(directories) > 0: raise ValueError( "Several directories starting with `cookiecutter-template-` in current working directory. " "Please clean your directory by removing all folders startign with `cookiecutter-template-` or " "change your working directory." ) path_to_transformer_root = ( Path(__file__).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent ) path_to_cookiecutter = path_to_transformer_root / "templates" / "adding_a_new_model" # Execute cookiecutter if not self._testing: cookiecutter(str(path_to_cookiecutter)) else: with open(self._testing_file, "r") as configuration_file: testing_configuration = json.load(configuration_file) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path), no_input=True, extra_context=testing_configuration, ) directory = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0] # Retrieve configuration with open(directory + "/configuration.json", "r") as configuration_file: configuration = json.load(configuration_file) lowercase_model_name = configuration["lowercase_modelname"] pytorch_or_tensorflow = configuration["generate_tensorflow_and_pytorch"] os.remove(f"{directory}/configuration.json") output_pytorch = "PyTorch" in pytorch_or_tensorflow output_tensorflow = "TensorFlow" in pytorch_or_tensorflow model_dir = f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}" os.makedirs(model_dir, exist_ok=True) shutil.move( f"{directory}/__init__.py", f"{model_dir}/__init__.py", ) shutil.move( f"{directory}/configuration_{lowercase_model_name}.py", f"{model_dir}/configuration_{lowercase_model_name}.py", ) def remove_copy_lines(path): with open(path, "r") as f: lines = f.readlines() with open(path, "w") as f: for line in lines: if "# Copied from transformers." not in line: f.write(line) if output_pytorch: if not self._testing: remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py") shutil.move( f"{directory}/modeling_{lowercase_model_name}.py", f"{model_dir}/modeling_{lowercase_model_name}.py", ) shutil.move( f"{directory}/test_modeling_{lowercase_model_name}.py", f"{path_to_transformer_root}/tests/test_modeling_{lowercase_model_name}.py", ) else: os.remove(f"{directory}/modeling_{lowercase_model_name}.py") os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py") if output_tensorflow: if not self._testing: remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py") shutil.move( f"{directory}/modeling_tf_{lowercase_model_name}.py", f"{model_dir}/modeling_tf_{lowercase_model_name}.py", ) shutil.move( f"{directory}/test_modeling_tf_{lowercase_model_name}.py", f"{path_to_transformer_root}/tests/test_modeling_tf_{lowercase_model_name}.py", ) else: os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py") os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py") shutil.move( f"{directory}/{lowercase_model_name}.rst", f"{path_to_transformer_root}/docs/source/model_doc/{lowercase_model_name}.rst", ) shutil.move( f"{directory}/tokenization_{lowercase_model_name}.py", f"{model_dir}/tokenization_{lowercase_model_name}.py", ) shutil.move( f"{directory}/tokenization_fast_{lowercase_model_name}.py", f"{model_dir}/tokenization_{lowercase_model_name}_fast.py", ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(original_file: str, line_to_copy_below: str, lines_to_copy: List[str]): # Create temp file fh, abs_path = mkstemp() line_found = False with fdopen(fh, "w") as new_file: with open(original_file) as old_file: for line in old_file: new_file.write(line) if line_to_copy_below in line: line_found = True for line_to_copy in lines_to_copy: new_file.write(line_to_copy) if not line_found: raise ValueError(f"Line {line_to_copy_below} was not found in file.") # Copy the file permissions from the old file to the new file copymode(original_file, abs_path) # Remove original file remove(original_file) # Move new file move(abs_path, original_file) def skip_units(line): return ("generating PyTorch" in line and not output_pytorch) or ( "generating TensorFlow" in line and not output_tensorflow ) def replace_in_files(path_to_datafile): with open(path_to_datafile) as datafile: lines_to_copy = [] skip_file = False skip_snippet = False for line in datafile: if "# To replace in: " in line and "##" not in line: file_to_replace_in = line.split('"')[1] skip_file = skip_units(line) elif "# Below: " in line and "##" not in line: line_to_copy_below = line.split('"')[1] skip_snippet = skip_units(line) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(file_to_replace_in, line_to_copy_below, lines_to_copy) lines_to_copy = [] elif "# Replace with" in line and "##" not in line: lines_to_copy = [] elif "##" not in line: lines_to_copy.append(line) remove(path_to_datafile) replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py") os.rmdir(directory)
AdaMix/src/transformers/commands/add_new_model.py/0
{ "file_path": "AdaMix/src/transformers/commands/add_new_model.py", "repo_id": "AdaMix", "token_count": 4355 }
49
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io import os from os.path import expanduser from typing import Dict, List, Optional, Tuple from tqdm import tqdm import requests ENDPOINT = "https://huggingface.co" class RepoObj: """ HuggingFace git-based system, data structure that represents a file belonging to the current user. """ def __init__(self, filename: str, lastModified: str, commit: str, size: int, **kwargs): self.filename = filename self.lastModified = lastModified self.commit = commit self.size = size class ModelSibling: """ Data structure that represents a public file inside a model, accessible from huggingface.co """ def __init__(self, rfilename: str, **kwargs): self.rfilename = rfilename # filename relative to the model root for k, v in kwargs.items(): setattr(self, k, v) class ModelInfo: """ Info about a public model accessible from huggingface.co """ def __init__( self, modelId: Optional[str] = None, # id of model tags: List[str] = [], pipeline_tag: Optional[str] = None, siblings: Optional[List[Dict]] = None, # list of files that constitute the model **kwargs ): self.modelId = modelId self.tags = tags self.pipeline_tag = pipeline_tag self.siblings = [ModelSibling(**x) for x in siblings] if siblings is not None else None for k, v in kwargs.items(): setattr(self, k, v) class HfApi: def __init__(self, endpoint=None): self.endpoint = endpoint if endpoint is not None else ENDPOINT def login(self, username: str, password: str) -> str: """ Call HF API to sign in a user and get a token if credentials are valid. Outputs: token if credentials are valid Throws: requests.exceptions.HTTPError if credentials are invalid """ path = "{}/api/login".format(self.endpoint) r = requests.post(path, json={"username": username, "password": password}) r.raise_for_status() d = r.json() return d["token"] def whoami(self, token: str) -> Tuple[str, List[str]]: """ Call HF API to know "whoami" """ path = "{}/api/whoami".format(self.endpoint) r = requests.get(path, headers={"authorization": "Bearer {}".format(token)}) r.raise_for_status() d = r.json() return d["user"], d["orgs"] def logout(self, token: str) -> None: """ Call HF API to log out. """ path = "{}/api/logout".format(self.endpoint) r = requests.post(path, headers={"authorization": "Bearer {}".format(token)}) r.raise_for_status() def model_list(self) -> List[ModelInfo]: """ Get the public list of all the models on huggingface.co """ path = "{}/api/models".format(self.endpoint) r = requests.get(path) r.raise_for_status() d = r.json() return [ModelInfo(**x) for x in d] def list_repos_objs(self, token: str, organization: Optional[str] = None) -> List[RepoObj]: """ HuggingFace git-based system, used for models. Call HF API to list all stored files for user (or one of their organizations). """ path = "{}/api/repos/ls".format(self.endpoint) params = {"organization": organization} if organization is not None else None r = requests.get(path, params=params, headers={"authorization": "Bearer {}".format(token)}) r.raise_for_status() d = r.json() return [RepoObj(**x) for x in d] def create_repo( self, token: str, name: str, organization: Optional[str] = None, private: Optional[bool] = None, exist_ok=False, lfsmultipartthresh: Optional[int] = None, ) -> str: """ HuggingFace git-based system, used for models. Call HF API to create a whole repo. Params: private: Whether the model repo should be private (requires a paid huggingface.co account) exist_ok: Do not raise an error if repo already exists lfsmultipartthresh: Optional: internal param for testing purposes. """ path = "{}/api/repos/create".format(self.endpoint) json = {"name": name, "organization": organization, "private": private} if lfsmultipartthresh is not None: json["lfsmultipartthresh"] = lfsmultipartthresh r = requests.post( path, headers={"authorization": "Bearer {}".format(token)}, json=json, ) if exist_ok and r.status_code == 409: return "" r.raise_for_status() d = r.json() return d["url"] def delete_repo(self, token: str, name: str, organization: Optional[str] = None): """ HuggingFace git-based system, used for models. Call HF API to delete a whole repo. CAUTION(this is irreversible). """ path = "{}/api/repos/delete".format(self.endpoint) r = requests.delete( path, headers={"authorization": "Bearer {}".format(token)}, json={"name": name, "organization": organization}, ) r.raise_for_status() class TqdmProgressFileReader: """ Wrap an io.BufferedReader `f` (such as the output of `open(…, "rb")`) and override `f.read()` so as to display a tqdm progress bar. see github.com/huggingface/transformers/pull/2078#discussion_r354739608 for implementation details. """ def __init__(self, f: io.BufferedReader): self.f = f self.total_size = os.fstat(f.fileno()).st_size self.pbar = tqdm(total=self.total_size, leave=False) self.read = f.read f.read = self._read def _read(self, n=-1): self.pbar.update(n) return self.read(n) def close(self): self.pbar.close() class HfFolder: path_token = expanduser("~/.huggingface/token") @classmethod def save_token(cls, token): """ Save token, creating folder as needed. """ os.makedirs(os.path.dirname(cls.path_token), exist_ok=True) with open(cls.path_token, "w+") as f: f.write(token) @classmethod def get_token(cls): """ Get token or None if not existent. """ try: with open(cls.path_token, "r") as f: return f.read() except FileNotFoundError: pass @classmethod def delete_token(cls): """ Delete token. Do not fail if token does not exist. """ try: os.remove(cls.path_token) except FileNotFoundError: pass
AdaMix/src/transformers/hf_api.py/0
{ "file_path": "AdaMix/src/transformers/hf_api.py", "repo_id": "AdaMix", "token_count": 3145 }
50
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Auto Config class. """ import re from collections import OrderedDict from ...configuration_utils import PretrainedConfig from ..albert.configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig from ..bart.configuration_bart import BART_PRETRAINED_CONFIG_ARCHIVE_MAP, BartConfig from ..bert.configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig from ..bert_generation.configuration_bert_generation import BertGenerationConfig from ..blenderbot.configuration_blenderbot import BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig from ..blenderbot_small.configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, ) from ..camembert.configuration_camembert import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig from ..convbert.configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig from ..ctrl.configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from ..deberta.configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig from ..deberta_v2.configuration_deberta_v2 import DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaV2Config from ..distilbert.configuration_distilbert import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig from ..dpr.configuration_dpr import DPR_PRETRAINED_CONFIG_ARCHIVE_MAP, DPRConfig from ..electra.configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig from ..encoder_decoder.configuration_encoder_decoder import EncoderDecoderConfig from ..flaubert.configuration_flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig from ..fsmt.configuration_fsmt import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMTConfig from ..funnel.configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from ..gpt2.configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config from ..ibert.configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig from ..layoutlm.configuration_layoutlm import LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMConfig from ..led.configuration_led import LED_PRETRAINED_CONFIG_ARCHIVE_MAP, LEDConfig from ..longformer.configuration_longformer import LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig from ..lxmert.configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from ..m2m_100.configuration_m2m_100 import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, M2M100Config from ..marian.configuration_marian import MarianConfig from ..mbart.configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig from ..mobilebert.configuration_mobilebert import MobileBertConfig from ..mpnet.configuration_mpnet import MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP, MPNetConfig from ..mt5.configuration_mt5 import MT5Config from ..openai.configuration_openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenAIGPTConfig from ..pegasus.configuration_pegasus import PegasusConfig from ..prophetnet.configuration_prophetnet import PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ProphetNetConfig from ..rag.configuration_rag import RagConfig from ..reformer.configuration_reformer import ReformerConfig from ..retribert.configuration_retribert import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RetriBertConfig from ..roberta.configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig from ..speech_to_text.configuration_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, Speech2TextConfig, ) from ..squeezebert.configuration_squeezebert import SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig from ..t5.configuration_t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config from ..tapas.configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from ..transfo_xl.configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from ..wav2vec2.configuration_wav2vec2 import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, Wav2Vec2Config from ..xlm.configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig from ..xlm_prophetnet.configuration_xlm_prophetnet import ( XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMProphetNetConfig, ) from ..xlm_roberta.configuration_xlm_roberta import XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig from ..xlnet.configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig ALL_PRETRAINED_CONFIG_ARCHIVE_MAP = dict( (key, value) for pretrained_map in [ # Add archive maps here SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LED_PRETRAINED_CONFIG_ARCHIVE_MAP, BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_CONFIG_ARCHIVE_MAP, BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_PRETRAINED_CONFIG_ARCHIVE_MAP, DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP, TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ] for key, value, in pretrained_map.items() ) CONFIG_MAPPING = OrderedDict( [ # Add configs here ("speech_to_text", Speech2TextConfig), ("wav2vec2", Wav2Vec2Config), ("m2m_100", M2M100Config), ("convbert", ConvBertConfig), ("led", LEDConfig), ("blenderbot-small", BlenderbotSmallConfig), ("retribert", RetriBertConfig), ("ibert", IBertConfig), ("mt5", MT5Config), ("t5", T5Config), ("mobilebert", MobileBertConfig), ("distilbert", DistilBertConfig), ("albert", AlbertConfig), ("bert-generation", BertGenerationConfig), ("camembert", CamembertConfig), ("xlm-roberta", XLMRobertaConfig), ("pegasus", PegasusConfig), ("marian", MarianConfig), ("mbart", MBartConfig), ("mpnet", MPNetConfig), ("bart", BartConfig), ("blenderbot", BlenderbotConfig), ("reformer", ReformerConfig), ("longformer", LongformerConfig), ("roberta", RobertaConfig), ("deberta-v2", DebertaV2Config), ("deberta", DebertaConfig), ("flaubert", FlaubertConfig), ("fsmt", FSMTConfig), ("squeezebert", SqueezeBertConfig), ("bert", BertConfig), ("openai-gpt", OpenAIGPTConfig), ("gpt2", GPT2Config), ("transfo-xl", TransfoXLConfig), ("xlnet", XLNetConfig), ("xlm-prophetnet", XLMProphetNetConfig), ("prophetnet", ProphetNetConfig), ("xlm", XLMConfig), ("ctrl", CTRLConfig), ("electra", ElectraConfig), ("encoder-decoder", EncoderDecoderConfig), ("funnel", FunnelConfig), ("lxmert", LxmertConfig), ("dpr", DPRConfig), ("layoutlm", LayoutLMConfig), ("rag", RagConfig), ("tapas", TapasConfig), ] ) MODEL_NAMES_MAPPING = OrderedDict( [ # Add full (and cased) model names here ("speech_to_text", "Speech2Text"), ("wav2vec2", "Wav2Vec2"), ("m2m_100", "M2M100"), ("convbert", "ConvBERT"), ("led", "LED"), ("blenderbot-small", "BlenderbotSmall"), ("retribert", "RetriBERT"), ("ibert", "I-BERT"), ("t5", "T5"), ("mobilebert", "MobileBERT"), ("distilbert", "DistilBERT"), ("albert", "ALBERT"), ("bert-generation", "Bert Generation"), ("camembert", "CamemBERT"), ("xlm-roberta", "XLM-RoBERTa"), ("pegasus", "Pegasus"), ("blenderbot", "Blenderbot"), ("marian", "Marian"), ("mbart", "mBART"), ("bart", "BART"), ("reformer", "Reformer"), ("longformer", "Longformer"), ("roberta", "RoBERTa"), ("flaubert", "FlauBERT"), ("fsmt", "FairSeq Machine-Translation"), ("squeezebert", "SqueezeBERT"), ("bert", "BERT"), ("openai-gpt", "OpenAI GPT"), ("gpt2", "OpenAI GPT-2"), ("transfo-xl", "Transformer-XL"), ("xlnet", "XLNet"), ("xlm", "XLM"), ("ctrl", "CTRL"), ("electra", "ELECTRA"), ("encoder-decoder", "Encoder decoder"), ("funnel", "Funnel Transformer"), ("lxmert", "LXMERT"), ("deberta-v2", "DeBERTa-v2"), ("deberta-densenet", "DeBERTa-DenseNet"), ("deberta", "DeBERTa"), ("layoutlm", "LayoutLM"), ("dpr", "DPR"), ("rag", "RAG"), ("xlm-prophetnet", "XLMProphetNet"), ("prophetnet", "ProphetNet"), ("mt5", "mT5"), ("mpnet", "MPNet"), ("tapas", "TAPAS"), ] ) def _list_model_options(indent, config_to_class=None, use_model_types=True): if config_to_class is None and not use_model_types: raise ValueError("Using `use_model_types=False` requires a `config_to_class` dictionary.") if use_model_types: if config_to_class is None: model_type_to_name = {model_type: config.__name__ for model_type, config in CONFIG_MAPPING.items()} else: model_type_to_name = { model_type: config_to_class[config].__name__ for model_type, config in CONFIG_MAPPING.items() if config in config_to_class } lines = [ f"{indent}- **{model_type}** -- :class:`~transformers.{cls_name}` ({MODEL_NAMES_MAPPING[model_type]} model)" for model_type, cls_name in model_type_to_name.items() ] else: config_to_name = {config.__name__: clas.__name__ for config, clas in config_to_class.items()} config_to_model_name = { config.__name__: MODEL_NAMES_MAPPING[model_type] for model_type, config in CONFIG_MAPPING.items() } lines = [ f"{indent}- :class:`~transformers.{config_name}` configuration class: :class:`~transformers.{cls_name}` ({config_to_model_name[config_name]} model)" for config_name, cls_name in config_to_name.items() ] return "\n".join(lines) def replace_list_option_in_docstrings(config_to_class=None, use_model_types=True): def docstring_decorator(fn): docstrings = fn.__doc__ lines = docstrings.split("\n") i = 0 while i < len(lines) and re.search(r"^(\s*)List options\s*$", lines[i]) is None: i += 1 if i < len(lines): indent = re.search(r"^(\s*)List options\s*$", lines[i]).groups()[0] if use_model_types: indent = f"{indent} " lines[i] = _list_model_options(indent, config_to_class=config_to_class, use_model_types=use_model_types) docstrings = "\n".join(lines) else: raise ValueError( f"The function {fn} should have an empty 'List options' in its docstring as placeholder, current docstring is:\n{docstrings}" ) fn.__doc__ = docstrings return fn return docstring_decorator class AutoConfig: r""" This is a generic configuration class that will be instantiated as one of the configuration classes of the library when created with the :meth:`~transformers.AutoConfig.from_pretrained` class method. This class cannot be instantiated directly using ``__init__()`` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoConfig is designed to be instantiated " "using the `AutoConfig.from_pretrained(pretrained_model_name_or_path)` method." ) @classmethod def for_model(cls, model_type: str, *args, **kwargs): if model_type in CONFIG_MAPPING: config_class = CONFIG_MAPPING[model_type] return config_class(*args, **kwargs) raise ValueError( "Unrecognized model identifier: {}. Should contain one of {}".format( model_type, ", ".join(CONFIG_MAPPING.keys()) ) ) @classmethod @replace_list_option_in_docstrings() def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): r""" Instantiate one of the configuration classes of the library from a pretrained model configuration. The configuration class to instantiate is selected based on the :obj:`model_type` property of the config object that is loaded, or when it's missing, by falling back to using pattern matching on :obj:`pretrained_model_name_or_path`: List options Args: pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`): Can be either: - A string, the `model id` of a pretrained model configuration hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under a user or organization name, like ``dbmdz/bert-base-german-cased``. - A path to a `directory` containing a configuration file saved using the :meth:`~transformers.PretrainedConfig.save_pretrained` method, or the :meth:`~transformers.PreTrainedModel.save_pretrained` method, e.g., ``./my_model_directory/``. - A path or url to a saved configuration JSON `file`, e.g., ``./my_model_directory/configuration.json``. cache_dir (:obj:`str` or :obj:`os.PathLike`, `optional`): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to force the (re-)download the model weights and configuration files and override the cached versions if they exist. resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies (:obj:`Dict[str, str]`, `optional`): A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. revision(:obj:`str`, `optional`, defaults to :obj:`"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any identifier allowed by git. return_unused_kwargs (:obj:`bool`, `optional`, defaults to :obj:`False`): If :obj:`False`, then this function returns just the final configuration object. If :obj:`True`, then this functions returns a :obj:`Tuple(config, unused_kwargs)` where `unused_kwargs` is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the part of ``kwargs`` which has not been used to update ``config`` and is otherwise ignored. kwargs(additional keyword arguments, `optional`): The values in kwargs of any keys which are configuration attributes will be used to override the loaded values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled by the ``return_unused_kwargs`` keyword parameter. Examples:: >>> from transformers import AutoConfig >>> # Download configuration from huggingface.co and cache. >>> config = AutoConfig.from_pretrained('bert-base-uncased') >>> # Download configuration from huggingface.co (user-uploaded) and cache. >>> config = AutoConfig.from_pretrained('dbmdz/bert-base-german-cased') >>> # If configuration file is in a directory (e.g., was saved using `save_pretrained('./test/saved_model/')`). >>> config = AutoConfig.from_pretrained('./test/bert_saved_model/') >>> # Load a specific configuration file. >>> config = AutoConfig.from_pretrained('./test/bert_saved_model/my_configuration.json') >>> # Change some config attributes when loading a pretrained config. >>> config = AutoConfig.from_pretrained('bert-base-uncased', output_attentions=True, foo=False) >>> config.output_attentions True >>> config, unused_kwargs = AutoConfig.from_pretrained('bert-base-uncased', output_attentions=True, foo=False, return_unused_kwargs=True) >>> config.output_attentions True >>> config.unused_kwargs {'foo': False} """ config_dict, _ = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) if "model_type" in config_dict: config_class = CONFIG_MAPPING[config_dict["model_type"]] return config_class.from_dict(config_dict, **kwargs) else: # Fallback: use pattern matching on the string. for pattern, config_class in CONFIG_MAPPING.items(): if pattern in str(pretrained_model_name_or_path): return config_class.from_dict(config_dict, **kwargs) raise ValueError( "Unrecognized model in {}. " "Should have a `model_type` key in its config.json, or contain one of the following strings " "in its name: {}".format(pretrained_model_name_or_path, ", ".join(CONFIG_MAPPING.keys())) )
AdaMix/src/transformers/models/auto/configuration_auto.py/0
{ "file_path": "AdaMix/src/transformers/models/auto/configuration_auto.py", "repo_id": "AdaMix", "token_count": 8527 }
51
# coding=utf-8 # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...utils import logging from ..roberta.tokenization_roberta_fast import RobertaTokenizerFast from .tokenization_bart import BartTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all BART models at https://huggingface.co/models?filter=bart PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, "tokenizer_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "facebook/bart-base": 1024, "facebook/bart-large": 1024, "facebook/bart-large-mnli": 1024, "facebook/bart-large-cnn": 1024, "facebook/bart-large-xsum": 1024, "yjernite/bart_eli5": 1024, } class BartTokenizerFast(RobertaTokenizerFast): r""" Construct a "fast" BART tokenizer (backed by HuggingFace's `tokenizers` library). :class:`~transformers.BartTokenizerFast` is identical to :class:`~transformers.RobertaTokenizerFast`. Refer to superclass :class:`~transformers.RobertaTokenizerFast` for usage examples and documentation concerning the initialization parameters and other methods. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES slow_tokenizer_class = BartTokenizer
AdaMix/src/transformers/models/bart/tokenization_bart_fast.py/0
{ "file_path": "AdaMix/src/transformers/models/bart/tokenization_bart_fast.py", "repo_id": "AdaMix", "token_count": 1510 }
52
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Huggingface Pytorch checkpoint to Tensorflow checkpoint.""" import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def convert_pytorch_checkpoint_to_tf(model: BertModel, ckpt_dir: str, model_name: str): """ Args: model: BertModel Pytorch model instance to be converted ckpt_dir: Tensorflow model directory model_name: model name Currently supported HF models: - Y BertModel - N BertForMaskedLM - N BertForPreTraining - N BertForMultipleChoice - N BertForNextSentencePrediction - N BertForSequenceClassification - N BertForQuestionAnswering """ tensors_to_transpose = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") var_map = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(ckpt_dir): os.makedirs(ckpt_dir) state_dict = model.state_dict() def to_tf_var_name(name: str): for patt, repl in iter(var_map): name = name.replace(patt, repl) return "bert/{}".format(name) def create_tf_var(tensor: np.ndarray, name: str, session: tf.Session): tf_dtype = tf.dtypes.as_dtype(tensor.dtype) tf_var = tf.get_variable(dtype=tf_dtype, shape=tensor.shape, name=name, initializer=tf.zeros_initializer()) session.run(tf.variables_initializer([tf_var])) session.run(tf_var) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: tf_name = to_tf_var_name(var_name) torch_tensor = state_dict[var_name].numpy() if any([x in var_name for x in tensors_to_transpose]): torch_tensor = torch_tensor.T tf_var = create_tf_var(tensor=torch_tensor, name=tf_name, session=session) tf.keras.backend.set_value(tf_var, torch_tensor) tf_weight = session.run(tf_var) print("Successfully created {}: {}".format(tf_name, np.allclose(tf_weight, torch_tensor))) saver = tf.train.Saver(tf.trainable_variables()) saver.save(session, os.path.join(ckpt_dir, model_name.replace("-", "_") + ".ckpt")) def main(raw_args=None): parser = argparse.ArgumentParser() parser.add_argument("--model_name", type=str, required=True, help="model name e.g. bert-base-uncased") parser.add_argument( "--cache_dir", type=str, default=None, required=False, help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path", type=str, required=True, help="/path/to/<pytorch-model-name>.bin") parser.add_argument("--tf_cache_dir", type=str, required=True, help="Directory in which to save tensorflow model") args = parser.parse_args(raw_args) model = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name, state_dict=torch.load(args.pytorch_model_path), cache_dir=args.cache_dir, ) convert_pytorch_checkpoint_to_tf(model=model, ckpt_dir=args.tf_cache_dir, model_name=args.model_name) if __name__ == "__main__": main()
AdaMix/src/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py/0
{ "file_path": "AdaMix/src/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py", "repo_id": "AdaMix", "token_count": 1659 }
53
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for ConvBERT.""" from ...utils import logging from ..bert.tokenization_bert import BertTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt", "YituTech/conv-bert-medium-small": "https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt", "YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "YituTech/conv-bert-base": 512, "YituTech/conv-bert-medium-small": 512, "YituTech/conv-bert-small": 512, } PRETRAINED_INIT_CONFIGURATION = { "YituTech/conv-bert-base": {"do_lower_case": True}, "YituTech/conv-bert-medium-small": {"do_lower_case": True}, "YituTech/conv-bert-small": {"do_lower_case": True}, } class ConvBertTokenizer(BertTokenizer): r""" Construct a ConvBERT tokenizer. :class:`~transformers.ConvBertTokenizer` is identical to :class:`~transformers.BertTokenizer` and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass :class:`~transformers.BertTokenizer` for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
AdaMix/src/transformers/models/convbert/tokenization_convbert.py/0
{ "file_path": "AdaMix/src/transformers/models/convbert/tokenization_convbert.py", "repo_id": "AdaMix", "token_count": 812 }
54
# coding=utf-8 # Copyright 2020, Microsoft and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ DeBERTa model configuration """ from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP = { "microsoft/deberta-base": "https://huggingface.co/microsoft/deberta-base/resolve/main/config.json", "microsoft/deberta-large": "https://huggingface.co/microsoft/deberta-large/resolve/main/config.json", "microsoft/deberta-xlarge": "https://huggingface.co/microsoft/deberta-xlarge/resolve/main/config.json", "microsoft/deberta-base-mnli": "https://huggingface.co/microsoft/deberta-base-mnli/resolve/main/config.json", "microsoft/deberta-large-mnli": "https://huggingface.co/microsoft/deberta-large-mnli/resolve/main/config.json", "microsoft/deberta-xlarge-mnli": "https://huggingface.co/microsoft/deberta-xlarge-mnli/resolve/main/config.json", } class DebertaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a :class:`~transformers.DebertaModel` or a :class:`~transformers.TFDebertaModel`. It is used to instantiate a DeBERTa model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the DeBERTa `microsoft/deberta-base <https://huggingface.co/microsoft/deberta-base>`__ architecture. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Arguments: vocab_size (:obj:`int`, `optional`, defaults to 30522): Vocabulary size of the DeBERTa model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.DebertaModel` or :class:`~transformers.TFDebertaModel`. hidden_size (:obj:`int`, `optional`, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (:obj:`int`, `optional`, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (:obj:`int`, `optional`, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (:obj:`int`, `optional`, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (:obj:`str` or :obj:`Callable`, `optional`, defaults to :obj:`"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"`, :obj:`"gelu"`, :obj:`"tanh"`, :obj:`"gelu_fast"`, :obj:`"mish"`, :obj:`"linear"`, :obj:`"sigmoid"` and :obj:`"gelu_new"` are supported. hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (:obj:`int`, `optional`, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (:obj:`int`, `optional`, defaults to 2): The vocabulary size of the :obj:`token_type_ids` passed when calling :class:`~transformers.DebertaModel` or :class:`~transformers.TFDebertaModel`. initializer_range (:obj:`float`, `optional`, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12): The epsilon used by the layer normalization layers. relative_attention (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether use relative position encoding. max_relative_positions (:obj:`int`, `optional`, defaults to 1): The range of relative positions :obj:`[-max_position_embeddings, max_position_embeddings]`. Use the same value as :obj:`max_position_embeddings`. pad_token_id (:obj:`int`, `optional`, defaults to 0): The value used to pad input_ids. position_biased_input (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether add absolute position embedding to content embedding. pos_att_type (:obj:`List[str]`, `optional`): The type of relative position attention, it can be a combination of :obj:`["p2c", "c2p", "p2p"]`, e.g. :obj:`["p2c"]`, :obj:`["p2c", "c2p"]`, :obj:`["p2c", "c2p", 'p2p"]`. layer_norm_eps (:obj:`float`, optional, defaults to 1e-12): The epsilon used by the layer normalization layers. """ model_type = "deberta" def __init__( self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=0, initializer_range=0.02, layer_norm_eps=1e-7, relative_attention=False, max_relative_positions=-1, pad_token_id=0, position_biased_input=True, pos_att_type=None, pooler_dropout=0, pooler_hidden_act="gelu", **kwargs ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.relative_attention = relative_attention self.max_relative_positions = max_relative_positions self.pad_token_id = pad_token_id self.position_biased_input = position_biased_input # Backwards compatibility if type(pos_att_type) == str: pos_att_type = [x.strip() for x in pos_att_type.lower().split("|")] self.pos_att_type = pos_att_type self.vocab_size = vocab_size self.layer_norm_eps = layer_norm_eps self.pooler_hidden_size = kwargs.get("pooler_hidden_size", hidden_size) self.pooler_dropout = pooler_dropout self.pooler_hidden_act = pooler_hidden_act
AdaMix/src/transformers/models/deberta/configuration_deberta.py/0
{ "file_path": "AdaMix/src/transformers/models/deberta/configuration_deberta.py", "repo_id": "AdaMix", "token_count": 3007 }
55
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import collections from pathlib import Path import torch from torch.serialization import default_restore_location from .transformers import BertConfig, DPRConfig, DPRContextEncoder, DPRQuestionEncoder, DPRReader CheckpointState = collections.namedtuple( "CheckpointState", ["model_dict", "optimizer_dict", "scheduler_dict", "offset", "epoch", "encoder_params"] ) def load_states_from_checkpoint(model_file: str) -> CheckpointState: print("Reading saved model from %s", model_file) state_dict = torch.load(model_file, map_location=lambda s, l: default_restore_location(s, "cpu")) return CheckpointState(**state_dict) class DPRState: def __init__(self, src_file: Path): self.src_file = src_file def load_dpr_model(self): raise NotImplementedError @staticmethod def from_type(comp_type: str, *args, **kwargs) -> "DPRState": if comp_type.startswith("c"): return DPRContextEncoderState(*args, **kwargs) if comp_type.startswith("q"): return DPRQuestionEncoderState(*args, **kwargs) if comp_type.startswith("r"): return DPRReaderState(*args, **kwargs) else: raise ValueError("Component type must be either 'ctx_encoder', 'question_encoder' or 'reader'.") class DPRContextEncoderState(DPRState): def load_dpr_model(self): model = DPRContextEncoder(DPRConfig(**BertConfig.get_config_dict("bert-base-uncased")[0])) print("Loading DPR biencoder from {}".format(self.src_file)) saved_state = load_states_from_checkpoint(self.src_file) encoder, prefix = model.ctx_encoder, "ctx_model." # Fix changes from https://github.com/huggingface/transformers/commit/614fef1691edb806de976756d4948ecbcd0c0ca3 state_dict = {"bert_model.embeddings.position_ids": model.ctx_encoder.bert_model.embeddings.position_ids} for key, value in saved_state.model_dict.items(): if key.startswith(prefix): key = key[len(prefix) :] if not key.startswith("encode_proj."): key = "bert_model." + key state_dict[key] = value encoder.load_state_dict(state_dict) return model class DPRQuestionEncoderState(DPRState): def load_dpr_model(self): model = DPRQuestionEncoder(DPRConfig(**BertConfig.get_config_dict("bert-base-uncased")[0])) print("Loading DPR biencoder from {}".format(self.src_file)) saved_state = load_states_from_checkpoint(self.src_file) encoder, prefix = model.question_encoder, "question_model." # Fix changes from https://github.com/huggingface/transformers/commit/614fef1691edb806de976756d4948ecbcd0c0ca3 state_dict = {"bert_model.embeddings.position_ids": model.question_encoder.bert_model.embeddings.position_ids} for key, value in saved_state.model_dict.items(): if key.startswith(prefix): key = key[len(prefix) :] if not key.startswith("encode_proj."): key = "bert_model." + key state_dict[key] = value encoder.load_state_dict(state_dict) return model class DPRReaderState(DPRState): def load_dpr_model(self): model = DPRReader(DPRConfig(**BertConfig.get_config_dict("bert-base-uncased")[0])) print("Loading DPR reader from {}".format(self.src_file)) saved_state = load_states_from_checkpoint(self.src_file) # Fix changes from https://github.com/huggingface/transformers/commit/614fef1691edb806de976756d4948ecbcd0c0ca3 state_dict = { "encoder.bert_model.embeddings.position_ids": model.span_predictor.encoder.bert_model.embeddings.position_ids } for key, value in saved_state.model_dict.items(): if key.startswith("encoder.") and not key.startswith("encoder.encode_proj"): key = "encoder.bert_model." + key[len("encoder.") :] state_dict[key] = value model.span_predictor.load_state_dict(state_dict) return model def convert(comp_type: str, src_file: Path, dest_dir: Path): dest_dir = Path(dest_dir) dest_dir.mkdir(exist_ok=True) dpr_state = DPRState.from_type(comp_type, src_file=src_file) model = dpr_state.load_dpr_model() model.save_pretrained(dest_dir) model.from_pretrained(dest_dir) # sanity check if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--type", type=str, help="Type of the component to convert: 'ctx_encoder', 'question_encoder' or 'reader'." ) parser.add_argument( "--src", type=str, help="Path to the dpr checkpoint file. They can be downloaded from the official DPR repo https://github.com/facebookresearch/DPR. Note that in the official repo, both encoders are stored in the 'retriever' checkpoints.", ) parser.add_argument("--dest", type=str, default=None, help="Path to the output PyTorch model directory.") args = parser.parse_args() src_file = Path(args.src) dest_dir = f"converted-{src_file.name}" if args.dest is None else args.dest dest_dir = Path(dest_dir) assert src_file.exists() assert ( args.type is not None ), "Please specify the component type of the DPR model to convert: 'ctx_encoder', 'question_encoder' or 'reader'." convert(args.type, src_file, dest_dir)
AdaMix/src/transformers/models/dpr/convert_dpr_original_checkpoint_to_pytorch.py/0
{ "file_path": "AdaMix/src/transformers/models/dpr/convert_dpr_original_checkpoint_to_pytorch.py", "repo_id": "AdaMix", "token_count": 2397 }
56
# coding=utf-8 # Copyright 2019-present CNRS, Facebook Inc. and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for Flaubert, based on XLM.""" import unicodedata import six from ...utils import logging from ..xlm.tokenization_xlm import XLMTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "flaubert/flaubert_small_cased": "https://huggingface.co/flaubert/flaubert_small_cased/resolve/main/vocab.json", "flaubert/flaubert_base_uncased": "https://huggingface.co/flaubert/flaubert_base_uncased/resolve/main/vocab.json", "flaubert/flaubert_base_cased": "https://huggingface.co/flaubert/flaubert_base_cased/resolve/main/vocab.json", "flaubert/flaubert_large_cased": "https://huggingface.co/flaubert/flaubert_large_cased/resolve/main/vocab.json", }, "merges_file": { "flaubert/flaubert_small_cased": "https://huggingface.co/flaubert/flaubert_small_cased/resolve/main/merges.txt", "flaubert/flaubert_base_uncased": "https://huggingface.co/flaubert/flaubert_base_uncased/resolve/main/merges.txt", "flaubert/flaubert_base_cased": "https://huggingface.co/flaubert/flaubert_base_cased/resolve/main/merges.txt", "flaubert/flaubert_large_cased": "https://huggingface.co/flaubert/flaubert_large_cased/resolve/main/merges.txt", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "flaubert/flaubert_small_cased": 512, "flaubert/flaubert_base_uncased": 512, "flaubert/flaubert_base_cased": 512, "flaubert/flaubert_large_cased": 512, } PRETRAINED_INIT_CONFIGURATION = { "flaubert/flaubert_small_cased": {"do_lowercase": False}, "flaubert/flaubert_base_uncased": {"do_lowercase": True}, "flaubert/flaubert_base_cased": {"do_lowercase": False}, "flaubert/flaubert_large_cased": {"do_lowercase": False}, } def convert_to_unicode(text): """ Converts `text` to Unicode (if it's not already), assuming UTF-8 input. """ # six_ensure_text is copied from https://github.com/benjaminp/six def six_ensure_text(s, encoding="utf-8", errors="strict"): if isinstance(s, six.binary_type): return s.decode(encoding, errors) elif isinstance(s, six.text_type): return s else: raise TypeError("not expecting type '%s'" % type(s)) return six_ensure_text(text, encoding="utf-8", errors="ignore") class FlaubertTokenizer(XLMTokenizer): """ Construct a Flaubert tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following: - Moses preprocessing and tokenization. - Normalizing all inputs text. - The arguments ``special_tokens`` and the function ``set_special_tokens``, can be used to add additional symbols (like "__classify__") to a vocabulary. - The argument :obj:`do_lowercase` controls lower casing (automatically set for pretrained vocabularies). This tokenizer inherits from :class:`~transformers.XLMTokenizer`. Please check the superclass for usage examples and documentation regarding arguments. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self, do_lowercase=False, **kwargs): super().__init__(**kwargs) self.do_lowercase = do_lowercase self.do_lowercase_and_remove_accent = False def preprocess_text(self, text): text = text.replace("``", '"').replace("''", '"') text = convert_to_unicode(text) text = unicodedata.normalize("NFC", text) if self.do_lowercase: text = text.lower() return text def _tokenize(self, text, bypass_tokenizer=False): """ Tokenize a string given language code using Moses. Details of tokenization: - [sacremoses](https://github.com/alvations/sacremoses): port of Moses - Install with `pip install sacremoses` Args: - bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False) (bool). If True, we only apply BPE. Returns: List of tokens. """ lang = "fr" if lang and self.lang2id and lang not in self.lang2id: logger.error( "Supplied language code not found in lang2id mapping. Please check that your language is supported by the loaded pretrained model." ) if bypass_tokenizer: text = text.split() else: text = self.preprocess_text(text) text = self.moses_pipeline(text, lang=lang) text = self.moses_tokenize(text, lang=lang) split_tokens = [] for token in text: if token: split_tokens.extend([t for t in self.bpe(token).split(" ")]) return split_tokens
AdaMix/src/transformers/models/flaubert/tokenization_flaubert.py/0
{ "file_path": "AdaMix/src/transformers/models/flaubert/tokenization_flaubert.py", "repo_id": "AdaMix", "token_count": 2308 }
57
# coding=utf-8 # Copyright 2020, Hugging Face # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Funnel Transformer model configuration """ from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP = { "funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json", "funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json", "funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json", "funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json", "funnel-transformer/intermediate": "https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json", "funnel-transformer/intermediate-base": "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json", "funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json", "funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json", "funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json", "funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json", } class FunnelConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a :class:`~transformers.FunnelModel` or a :class:`~transformers.TFBertModel`. It is used to instantiate a Funnel Transformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Funnel Transformer `funnel-transformer/small <https://huggingface.co/funnel-transformer/small>`__ architecture. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: vocab_size (:obj:`int`, `optional`, defaults to 30522): Vocabulary size of the Funnel transformer. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.FunnelModel` or :class:`~transformers.TFFunnelModel`. block_sizes (:obj:`List[int]`, `optional`, defaults to :obj:`[4, 4, 4]`): The sizes of the blocks used in the model. block_repeats (:obj:`List[int]`, `optional`): If passed along, each layer of each block is repeated the number of times indicated. num_decoder_layers (:obj:`int`, `optional`, defaults to 2): The number of layers in the decoder (when not using the base model). d_model (:obj:`int`, `optional`, defaults to 768): Dimensionality of the model's hidden states. n_head (:obj:`int`, `optional`, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. d_head (:obj:`int`, `optional`, defaults to 64): Dimensionality of the model's heads. d_inner (:obj:`int`, `optional`, defaults to 3072): Inner dimension in the feed-forward blocks. hidden_act (:obj:`str` or :obj:`callable`, `optional`, defaults to :obj:`"gelu_new"`): The non-linear activation function (function or string) in the encoder and pooler. If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and :obj:`"gelu_new"` are supported. hidden_dropout (:obj:`float`, `optional`, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (:obj:`float`, `optional`, defaults to 0.1): The dropout probability for the attention probabilities. activation_dropout (:obj:`float`, `optional`, defaults to 0.0): The dropout probability used between the two layers of the feed-forward blocks. max_position_embeddings (:obj:`int`, `optional`, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (:obj:`int`, `optional`, defaults to 3): The vocabulary size of the :obj:`token_type_ids` passed when calling :class:`~transformers.FunnelModel` or :class:`~transformers.TFFunnelModel`. initializer_range (:obj:`float`, `optional`, defaults to 0.1): The standard deviation of the `uniform initializer` for initializing all weight matrices in attention layers. initializer_std (:obj:`float`, `optional`): The standard deviation of the `normal initializer` for initializing the embedding matrix and the weight of linear layers. Will default to 1 for the embedding matrix and the value given by Xavier initialization for linear layers. layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-9): The epsilon used by the layer normalization layers. pooling_type (:obj:`str`, `optional`, defaults to :obj:`"mean"`): Possible values are ``"mean"`` or ``"max"``. The way pooling is performed at the beginning of each block. attention_type (:obj:`str`, `optional`, defaults to :obj:`"relative_shift"`): Possible values are ``"relative_shift"`` or ``"factorized"``. The former is faster on CPU/GPU while the latter is faster on TPU. separate_cls (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to separate the cls token when applying pooling. truncate_seq (:obj:`bool`, `optional`, defaults to :obj:`False`): When using ``separate_cls``, whether or not to truncate the last token when pooling, to avoid getting a sequence length that is not a multiple of 2. pool_q_only (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to apply the pooling only to the query or to query, key and values for the attention layers. """ model_type = "funnel" def __init__( self, vocab_size=30522, block_sizes=[4, 4, 4], block_repeats=None, num_decoder_layers=2, d_model=768, n_head=12, d_head=64, d_inner=3072, hidden_act="gelu_new", hidden_dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, max_position_embeddings=512, type_vocab_size=3, initializer_range=0.1, initializer_std=None, layer_norm_eps=1e-9, pooling_type="mean", attention_type="relative_shift", separate_cls=True, truncate_seq=True, pool_q_only=True, **kwargs ): super().__init__(**kwargs) self.vocab_size = vocab_size self.block_sizes = block_sizes self.block_repeats = [1] * len(block_sizes) if block_repeats is None else block_repeats assert len(block_sizes) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." self.num_decoder_layers = num_decoder_layers self.d_model = d_model self.n_head = n_head self.d_head = d_head self.d_inner = d_inner self.hidden_act = hidden_act self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.initializer_std = initializer_std self.layer_norm_eps = layer_norm_eps assert pooling_type in [ "mean", "max", ], f"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported." self.pooling_type = pooling_type assert attention_type in [ "relative_shift", "factorized", ], f"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported." self.attention_type = attention_type self.separate_cls = separate_cls self.truncate_seq = truncate_seq self.pool_q_only = pool_q_only @property def hidden_size(self): return self.d_model @property def num_attention_heads(self): return self.n_head @property def num_hidden_layers(self): return sum(self.block_sizes) @property def num_blocks(self): return len(self.block_sizes)
AdaMix/src/transformers/models/funnel/configuration_funnel.py/0
{ "file_path": "AdaMix/src/transformers/models/funnel/configuration_funnel.py", "repo_id": "AdaMix", "token_count": 3695 }
58
# coding=utf-8 # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for OpenAI GPT.""" import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json", }, "merges_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "gpt2": 1024, "gpt2-medium": 1024, "gpt2-large": 1024, "gpt2-xl": 1024, "distilgpt2": 1024, } @lru_cache() def bytes_to_unicode(): """ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. """ bs = ( list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) ) cs = bs[:] n = 0 for b in range(2 ** 8): if b not in bs: bs.append(b) cs.append(2 ** 8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) def get_pairs(word): """ Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs class GPT2Tokenizer(PreTrainedTokenizer): """ Construct a GPT-2 tokenizer. Based on byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: :: >>> from transformers import GPT2Tokenizer >>> tokenizer = GPT2Tokenizer.from_pretrained("gpt2") >>> tokenizer("Hello world")['input_ids'] [15496, 995] >>> tokenizer(" Hello world")['input_ids'] [18435, 995] You can get around that behavior by passing ``add_prefix_space=True`` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. .. note:: When used with ``is_split_into_words=True``, this tokenizer will add a space before each word (even the first one). This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (:obj:`str`): Path to the vocabulary file. merges_file (:obj:`str`): Path to the merges file. errors (:obj:`str`, `optional`, defaults to :obj:`"replace"`): Paradigm to follow when decoding bytes to UTF-8. See `bytes.decode <https://docs.python.org/3/library/stdtypes.html#bytes.decode>`__ for more information. unk_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`): The beginning of sequence token. eos_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`): The end of sequence token. add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (GPT2 tokenizer detect beginning of words by the preceding space). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, merges_file, errors="replace", unk_token="<|endoftext|>", bos_token="<|endoftext|>", eos_token="<|endoftext|>", add_prefix_space=False, **kwargs ): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token super().__init__( errors=errors, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, add_prefix_space=add_prefix_space, **kwargs, ) with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} self.errors = errors # how to handle errors in decoding self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} with open(merges_file, encoding="utf-8") as merges_handle: bpe_merges = merges_handle.read().split("\n")[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} self.add_prefix_space = add_prefix_space # Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") @property def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) self.cache[token] = word return word def _tokenize(self, text): """ Tokenize a string. """ bpe_tokens = [] for token in re.findall(self.pat, text): token = "".join( self.byte_encoder[b] for b in token.encode("utf-8") ) # Maps all our bytes to unicode strings, avoiding controle tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) return bpe_tokens def _convert_token_to_id(self, token): """ Converts a token (str) in an id using the vocab. """ return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index) def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. """ text = "".join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) merge_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, ensure_ascii=False)) index = 0 with open(merge_file, "w", encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( "Saving vocabulary to {}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!".format(merge_file) ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return vocab_file, merge_file def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) if is_split_into_words or add_prefix_space: text = " " + text return (text, kwargs) def _build_conversation_input_ids(self, conversation: "Conversation") -> List[int]: input_ids = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(text, add_special_tokens=False) + [self.eos_token_id]) if len(input_ids) > self.model_max_length: input_ids = input_ids[-self.model_max_length :] return input_ids
AdaMix/src/transformers/models/gpt2/tokenization_gpt2.py/0
{ "file_path": "AdaMix/src/transformers/models/gpt2/tokenization_gpt2.py", "repo_id": "AdaMix", "token_count": 5478 }
59
# coding=utf-8 # Copyright 2021 Iz Beltagy, Matthew E. Peters, Arman Cohan and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ LED model configuration """ from typing import List, Union from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) LED_PRETRAINED_CONFIG_ARCHIVE_MAP = { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/config.json", # See all LED models at https://huggingface.co/models?filter=led } class LEDConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a :class:`~transformers.LEDModel`. It is used to instantiate an LED model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LED `allenai/led-base-16384 <https://huggingface.co/allenai/led-base-16384>`__ architecture. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: vocab_size (:obj:`int`, `optional`, defaults to 50265): Vocabulary size of the LED model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.LEDModel` or :class:`~transformers.TFLEDModel`. d_model (:obj:`int`, `optional`, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (:obj:`int`, `optional`, defaults to 12): Number of encoder layers. decoder_layers (:obj:`int`, `optional`, defaults to 12): Number of decoder layers. encoder_attention_heads (:obj:`int`, `optional`, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (:obj:`int`, `optional`, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (:obj:`int`, `optional`, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (:obj:`int`, `optional`, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and :obj:`"gelu_new"` are supported. dropout (:obj:`float`, `optional`, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (:obj:`float`, `optional`, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (:obj:`float`, `optional`, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (:obj:`float`, `optional`, defaults to 0.0): The dropout ratio for classifier. max_encoder_position_embeddings (:obj:`int`, `optional`, defaults to 16384): The maximum sequence length that the encoder might ever be used with. max_decoder_position_embeddings (:obj:`int`, `optional`, defaults to 16384): The maximum sequence length that the decoder might ever be used with. init_std (:obj:`float`, `optional`, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0): The LayerDrop probability for the encoder. See the `LayerDrop paper <see https://arxiv.org/abs/1909.11556>`__ for more details. decoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0): The LayerDrop probability for the decoder. See the `LayerDrop paper <see https://arxiv.org/abs/1909.11556>`__ for more details. use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not the model should return the last key/values attentions (not used by all models) gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`): If True, use gradient checkpointing to save memory at the expense of slower backward pass. Example:: >>> from transformers import LEDModel, LEDConfig >>> # Initializing a LED allenai/led-base-16384 style configuration >>> configuration = LEDConfig() >>> # Initializing a model from the allenai/led-base-16384 style configuration >>> model = LEDModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config """ model_type = "led" def __init__( self, vocab_size=50265, max_encoder_position_embeddings=16384, max_decoder_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function="gelu", d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, classifier_dropout=0.0, pad_token_id=1, bos_token_id=0, eos_token_id=2, gradient_checkpointing=False, attention_window: Union[List[int], int] = 512, **kwargs ): super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, **kwargs, ) self.vocab_size = vocab_size self.max_encoder_position_embeddings = max_encoder_position_embeddings self.max_decoder_position_embeddings = max_decoder_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.classifier_dropout = classifier_dropout self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.attention_window = attention_window self.gradient_checkpointing = gradient_checkpointing @property def num_attention_heads(self) -> int: return self.encoder_attention_heads @property def hidden_size(self) -> int: return self.d_model @property def attention_probs_dropout_prob(self) -> float: return self.attention_dropout @property def initializer_range(self) -> float: return self.init_std
AdaMix/src/transformers/models/led/configuration_led.py/0
{ "file_path": "AdaMix/src/transformers/models/led/configuration_led.py", "repo_id": "AdaMix", "token_count": 3257 }
60
# coding=utf-8 # Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...utils import logging from ..roberta.tokenization_roberta_fast import RobertaTokenizerFast from .tokenization_longformer import LongformerTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json", "allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json", "allenai/longformer-large-4096-finetuned-triviaqa": "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json", "allenai/longformer-base-4096-extra.pos.embd.only": "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json", "allenai/longformer-large-4096-extra.pos.embd.only": "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json", }, "merges_file": { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt", "allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt", "allenai/longformer-large-4096-finetuned-triviaqa": "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt", "allenai/longformer-base-4096-extra.pos.embd.only": "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt", "allenai/longformer-large-4096-extra.pos.embd.only": "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/tokenizer.json", "allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/tokenizer.json", "allenai/longformer-large-4096-finetuned-triviaqa": "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/tokenizer.json", "allenai/longformer-base-4096-extra.pos.embd.only": "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/tokenizer.json", "allenai/longformer-large-4096-extra.pos.embd.only": "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/tokenizer.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "allenai/longformer-base-4096": 4096, "allenai/longformer-large-4096": 4096, "allenai/longformer-large-4096-finetuned-triviaqa": 4096, "allenai/longformer-base-4096-extra.pos.embd.only": 4096, "allenai/longformer-large-4096-extra.pos.embd.only": 4096, } class LongformerTokenizerFast(RobertaTokenizerFast): r""" Construct a "fast" Longformer tokenizer (backed by HuggingFace's `tokenizers` library). :class:`~transformers.LongformerTokenizerFast` is identical to :class:`~transformers.RobertaTokenizerFast`. Refer to the superclass for usage examples and documentation concerning parameters. """ # merges and vocab same as Roberta vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES slow_tokenizer_class = LongformerTokenizer
AdaMix/src/transformers/models/longformer/tokenization_longformer_fast.py/0
{ "file_path": "AdaMix/src/transformers/models/longformer/tokenization_longformer_fast.py", "repo_id": "AdaMix", "token_count": 1651 }
61
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import re import warnings from contextlib import contextmanager from pathlib import Path from shutil import copyfile from typing import Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer VOCAB_FILES_NAMES = { "source_spm": "source.spm", "target_spm": "target.spm", "vocab": "vocab.json", "tokenizer_config_file": "tokenizer_config.json", } PRETRAINED_VOCAB_FILES_MAP = { "source_spm": { "Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/source.spm" }, "target_spm": { "Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/target.spm" }, "vocab": { "Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/vocab.json" }, "tokenizer_config_file": { "Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/tokenizer_config.json" }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"Helsinki-NLP/opus-mt-en-de": 512} PRETRAINED_INIT_CONFIGURATION = {} # Example URL https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/vocab.json class MarianTokenizer(PreTrainedTokenizer): r""" Construct a Marian tokenizer. Based on `SentencePiece <https://github.com/google/sentencepiece>`__. This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: source_spm (:obj:`str`): `SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a .spm extension) that contains the vocabulary for the source language. target_spm (:obj:`str`): `SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a .spm extension) that contains the vocabulary for the target language. source_lang (:obj:`str`, `optional`): A string representing the source language. target_lang (:obj:`str`, `optional`): A string representing the target language. unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`): The end of sequence token. pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`): The token used for padding, for example when batching sequences of different lengths. model_max_length (:obj:`int`, `optional`, defaults to 512): The maximum sentence length the model accepts. additional_special_tokens (:obj:`List[str]`, `optional`, defaults to :obj:`["<eop>", "<eod>"]`): Additional special tokens used by the tokenizer. Examples:: >>> from transformers import MarianTokenizer >>> tokenizer = MarianTokenizer.from_pretrained('Helsinki-NLP/opus-mt-en-de') >>> src_texts = [ "I am a small frog.", "Tom asked his teacher for advice."] >>> tgt_texts = ["Ich bin ein kleiner Frosch.", "Tom bat seinen Lehrer um Rat."] # optional >>> inputs = tokenizer(src_texts, return_tensors="pt", padding=True) >>> with tokenizer.as_target_tokenizer(): ... labels = tokenizer(tgt_texts, return_tensors="pt", padding=True) >>> inputs["labels"] = labels["input_ids"] # keys [input_ids, attention_mask, labels]. >>> outputs = model(**inputs) should work """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] language_code_re = re.compile(">>.+<<") # type: re.Pattern def __init__( self, vocab, source_spm, target_spm, source_lang=None, target_lang=None, unk_token="<unk>", eos_token="</s>", pad_token="<pad>", model_max_length=512, **kwargs ): super().__init__( # bos_token=bos_token, unused. Start decoding with config.decoder_start_token_id source_lang=source_lang, target_lang=target_lang, unk_token=unk_token, eos_token=eos_token, pad_token=pad_token, model_max_length=model_max_length, **kwargs, ) assert Path(source_spm).exists(), f"cannot find spm source {source_spm}" self.encoder = load_json(vocab) if self.unk_token not in self.encoder: raise KeyError("<unk> token must be in vocab") assert self.pad_token in self.encoder self.decoder = {v: k for k, v in self.encoder.items()} self.source_lang = source_lang self.target_lang = target_lang self.supported_language_codes: list = [k for k in self.encoder if k.startswith(">>") and k.endswith("<<")] self.spm_files = [source_spm, target_spm] # load SentencePiece model for pre-processing self.spm_source = load_spm(source_spm) self.spm_target = load_spm(target_spm) self.current_spm = self.spm_source # Multilingual target side: default to using first supported language code. self._setup_normalizer() def _setup_normalizer(self): try: from sacremoses import MosesPunctNormalizer self.punc_normalizer = MosesPunctNormalizer(self.source_lang).normalize except (ImportError, FileNotFoundError): warnings.warn("Recommended: pip install sacremoses.") self.punc_normalizer = lambda x: x def normalize(self, x: str) -> str: """Cover moses empty string edge case. They return empty list for '' input!""" return self.punc_normalizer(x) if x else "" def _convert_token_to_id(self, token): return self.encoder.get(token, self.encoder[self.unk_token]) def remove_language_code(self, text: str): """Remove language codes like >>fr<< before sentencepiece""" match = self.language_code_re.match(text) code: list = [match.group(0)] if match else [] return code, self.language_code_re.sub("", text) def _tokenize(self, text: str) -> List[str]: code, text = self.remove_language_code(text) pieces = self.current_spm.EncodeAsPieces(text) return code + pieces def _convert_id_to_token(self, index: int) -> str: """Converts an index (integer) in a token (str) using the decoder.""" return self.decoder.get(index, self.unk_token) def batch_decode(self, sequences, **kwargs): """ Convert a list of lists of token ids into a list of strings by calling decode. Args: sequences (:obj:`Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Can be obtained using the ``__call__`` method. skip_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to clean up the tokenization spaces. use_source_tokenizer (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use the source tokenizer to decode sequences (only applicable in sequence-to-sequence problems). kwargs (additional keyword arguments, `optional`): Will be passed to the underlying model specific decode method. Returns: :obj:`List[str]`: The list of decoded sentences. """ return super().batch_decode(sequences, **kwargs) def decode(self, token_ids, **kwargs): """ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces. Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``. Args: token_ids (:obj:`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Can be obtained using the ``__call__`` method. skip_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to clean up the tokenization spaces. use_source_tokenizer (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use the source tokenizer to decode sequences (only applicable in sequence-to-sequence problems). kwargs (additional keyword arguments, `optional`): Will be passed to the underlying model specific decode method. Returns: :obj:`str`: The decoded sentence. """ return super().decode(token_ids, **kwargs) def convert_tokens_to_string(self, tokens: List[str]) -> str: """Uses source spm if _decode_use_source_tokenizer is True, and target spm otherwise """ if self._decode_use_source_tokenizer: return self.spm_source.DecodePieces(tokens) else: return self.spm_target.DecodePieces(tokens) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]: """Build model inputs from a sequence by appending eos_token_id.""" if token_ids_1 is None: return token_ids_0 + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_0 + token_ids_1 + [self.eos_token_id] @contextmanager def as_target_tokenizer(self): """ Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to sequence-to-sequence models that need a slightly different processing for the labels. """ self.current_spm = self.spm_target yield self.current_spm = self.spm_source @property def vocab_size(self) -> int: return len(self.encoder) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: save_dir = Path(save_directory) assert save_dir.is_dir(), f"{save_directory} should be a directory" save_json( self.encoder, save_dir / ((filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab"]), ) for orig, f in zip(["source.spm", "target.spm"], self.spm_files): dest_path = save_dir / ((filename_prefix + "-" if filename_prefix else "") + Path(f).name) if not dest_path.exists(): copyfile(f, save_dir / orig) return tuple( save_dir / ((filename_prefix + "-" if filename_prefix else "") + f) for f in self.vocab_files_names ) def get_vocab(self) -> Dict: vocab = self.encoder.copy() vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self) -> Dict: state = self.__dict__.copy() state.update({k: None for k in ["spm_source", "spm_target", "current_spm", "punc_normalizer"]}) return state def __setstate__(self, d: Dict) -> None: self.__dict__ = d self.spm_source, self.spm_target = (load_spm(f) for f in self.spm_files) self.current_spm = self.spm_source self._setup_normalizer() def num_special_tokens_to_add(self, **unused): """Just EOS""" return 1 def _special_token_mask(self, seq): all_special_ids = set(self.all_special_ids) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def get_special_tokens_mask( self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False ) -> List[int]: """Get list where entries are [1] if a token is [eos] or [pad] else 0.""" if already_has_special_tokens: return self._special_token_mask(token_ids_0) elif token_ids_1 is None: return self._special_token_mask(token_ids_0) + [1] else: return self._special_token_mask(token_ids_0 + token_ids_1) + [1] def load_spm(path: str) -> sentencepiece.SentencePieceProcessor: spm = sentencepiece.SentencePieceProcessor() spm.Load(path) return spm def save_json(data, path: str) -> None: with open(path, "w") as f: json.dump(data, f, indent=2) def load_json(path: str) -> Union[Dict, List]: with open(path, "r") as f: return json.load(f)
AdaMix/src/transformers/models/marian/tokenization_marian.py/0
{ "file_path": "AdaMix/src/transformers/models/marian/tokenization_marian.py", "repo_id": "AdaMix", "token_count": 5811 }
62
# coding=utf-8 # # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for MobileBERT.""" from ...utils import logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_mobilebert import MobileBertTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"}, "tokenizer_file": { "mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json" }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"mobilebert-uncased": 512} PRETRAINED_INIT_CONFIGURATION = {} class MobileBertTokenizerFast(BertTokenizerFast): r""" Construct a "fast" MobileBERT tokenizer (backed by HuggingFace's `tokenizers` library). :class:`~transformers.MobileBertTokenizerFast` is identical to :class:`~transformers.BertTokenizerFast` and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass :class:`~transformers.BertTokenizerFast` for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION slow_tokenizer_class = MobileBertTokenizer
AdaMix/src/transformers/models/mobilebert/tokenization_mobilebert_fast.py/0
{ "file_path": "AdaMix/src/transformers/models/mobilebert/tokenization_mobilebert_fast.py", "repo_id": "AdaMix", "token_count": 703 }
63
# coding=utf-8 # Copyright 2020, The T5 Authors and HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ mT5 model configuration """ from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class MT5Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a :class:`~transformers.MT5Model` or a :class:`~transformers.TFMT5Model`. It is used to instantiate a mT5 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the mT5 `google/mt5-small <https://huggingface.co/google/mt5-small>`__ architecture. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Arguments: vocab_size (:obj:`int`, `optional`, defaults to 32128): Vocabulary size of the T5 model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.T5Model` or :class:`~transformers.TFT5Model`. d_model (:obj:`int`, `optional`, defaults to 512): Size of the encoder layers and the pooler layer. d_kv (:obj:`int`, `optional`, defaults to 64): Size of the key, query, value projections per attention head. :obj:`d_kv` has to be equal to :obj:`d_model // num_heads`. d_ff (:obj:`int`, `optional`, defaults to 1024): Size of the intermediate feed forward layer in each :obj:`T5Block`. num_layers (:obj:`int`, `optional`, defaults to 8): Number of hidden layers in the Transformer encoder. num_decoder_layers (:obj:`int`, `optional`): Number of hidden layers in the Transformer decoder. Will use the same value as :obj:`num_layers` if not set. num_heads (:obj:`int`, `optional`, defaults to 6): Number of attention heads for each attention layer in the Transformer encoder. relative_attention_num_buckets (:obj:`int`, `optional`, defaults to 32): The number of buckets to use for each attention layer. dropout_rate (:obj:`float`, `optional`, defaults to 0.1): The ratio for all dropout layers. layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-6): The epsilon used by the layer normalization layers. initializer_factor (:obj:`float`, `optional`, defaults to 1): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). feed_forward_proj (:obj:`string`, `optional`, defaults to :obj:`"gated-gelu"`): Type of feed forward layer to be used. Should be one of :obj:`"relu"` or :obj:`"gated-gelu"`. use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not the model should return the last key/values attentions (not used by all models). """ model_type = "mt5" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=250112, d_model=512, d_kv=64, d_ff=1024, num_layers=8, num_decoder_layers=None, num_heads=6, relative_attention_num_buckets=32, dropout_rate=0.1, layer_norm_epsilon=1e-6, initializer_factor=1.0, feed_forward_proj="gated-gelu", is_encoder_decoder=True, use_cache=True, tokenizer_class="T5Tokenizer", tie_word_embeddings=False, pad_token_id=0, eos_token_id=1, decoder_start_token_id=0, **kwargs ): super().__init__( is_encoder_decoder=is_encoder_decoder, tokenizer_class=tokenizer_class, tie_word_embeddings=tie_word_embeddings, pad_token_id=pad_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, **kwargs, ) self.vocab_size = vocab_size self.d_model = d_model self.d_kv = d_kv self.d_ff = d_ff self.num_layers = num_layers self.num_decoder_layers = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry self.num_heads = num_heads self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.layer_norm_epsilon = layer_norm_epsilon self.initializer_factor = initializer_factor self.feed_forward_proj = feed_forward_proj self.use_cache = use_cache @property def hidden_size(self): return self.d_model @property def num_attention_heads(self): return self.num_heads @property def num_hidden_layers(self): return self.num_layers
AdaMix/src/transformers/models/mt5/configuration_mt5.py/0
{ "file_path": "AdaMix/src/transformers/models/mt5/configuration_mt5.py", "repo_id": "AdaMix", "token_count": 2276 }
64
# coding=utf-8 # Copyright 2020, The RAG Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TFRAG model implementation.""" from dataclasses import dataclass from typing import Dict, List, Optional, Tuple import numpy as np import tensorflow as tf from ...configuration_utils import PretrainedConfig from ...file_utils import ModelOutput, add_start_docstrings_to_model_forward, replace_return_docstrings from ...modeling_tf_outputs import TFBaseModelOutput from ...modeling_tf_utils import TFCausalLanguageModelingLoss, TFPreTrainedModel, input_processing, shape_list from ...utils import logging from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "RagConfig" @dataclass class TFRetrievAugLMMarginOutput(ModelOutput): """ Base class for retriever augmented marginalized models outputs. Args: loss (:obj:`tf.Tensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): Language modeling loss. logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token. past_key_values (:obj:`List[tf.Tensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``): List of :obj:`tf.Tensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see :obj:`past_key_values` input) to speed up sequential decoding. doc_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see :obj:`retrieved_doc_embeds`) and :obj:`question_encoder_last_hidden_state`. retrieved_doc_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, config.n_docs, hidden_size)`, `optional`, returned when `output_retrieved=True`): Embedded documents retrieved by the retriever. Is used with ``question_encoder_last_hidden_state`` to compute the ``doc_scores``. retrieved_doc_ids (:obj:`tf.Tensor` (int32) of shape :obj:`(batch_size, config.n_docs)`, `optional`, returned when `output_retrieved=True`): The indexes of the embedded documents retrieved by the retriever. context_input_ids (:obj:`tf.Tensor`(int32) of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`): Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever. context_attention_mask (:obj:`tf.Tensor` (int32) of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`): Attention mask post-processed from the retrieved documents and the question encoder :obj:`input_ids` by the retriever. question_encoder_last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model. question_enc_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden states of the question encoder at the output of each layer plus the initial embedding outputs. question_enc_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_enc_last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the generator encoder of the model. generator_enc_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs. generator_enc_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_dec_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs. generator_dec_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[tf.Tensor] = None logits: tf.Tensor = None past_key_values: Optional[List[tf.Tensor]] = None doc_scores: Optional[tf.Tensor] = None retrieved_doc_embeds: Optional[tf.Tensor] = None retrieved_doc_ids: Optional[tf.Tensor] = None context_input_ids: Optional[tf.Tensor] = None context_attention_mask: Optional[tf.Tensor] = None question_encoder_last_hidden_state: Optional[tf.Tensor] = None question_enc_hidden_states: Optional[Tuple[tf.Tensor]] = None question_enc_attentions: Optional[Tuple[tf.Tensor]] = None generator_enc_last_hidden_state: Optional[tf.Tensor] = None generator_enc_hidden_states: Optional[Tuple[tf.Tensor]] = None generator_enc_attentions: Optional[Tuple[tf.Tensor]] = None generator_dec_hidden_states: Optional[Tuple[tf.Tensor]] = None generator_dec_attentions: Optional[Tuple[tf.Tensor]] = None @dataclass class TFRetrievAugLMOutput(ModelOutput): """ Args: logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token. past_key_values (:obj:`List[tf.Tensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``): List of :obj:`tf.Tensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see :obj:`past_key_values` input) to speed up sequential decoding. doc_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see :obj:`retrieved_doc_embeds`) and :obj:`question_encoder_last_hidden_state`. retrieved_doc_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, config.n_docs, hidden_size)`, `optional`, returned when `output_retrieved=True`): Embedded documents retrieved by the retriever. Is used with ``question_encoder_last_hidden_state`` to compute the ``doc_scores``. retrieved_doc_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, config.n_docs)`, `optional`, returned when `output_retrieved=True`): The indexes of the embedded documents retrieved by the retriever. context_input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`): Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever. context_attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`): Attention mask post-processed from the retrieved documents and the question encoder :obj:`input_ids` by the retriever. question_encoder_last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model. question_enc_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden states of the question encoder at the output of each layer plus the initial embedding outputs. question_enc_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_enc_last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the generator encoder of the model. generator_enc_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs. generator_enc_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_dec_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs. generator_dec_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: tf.Tensor = None past_key_values: Optional[List[tf.Tensor]] = None doc_scores: Optional[tf.Tensor] = None retrieved_doc_embeds: Optional[tf.Tensor] = None retrieved_doc_ids: Optional[tf.Tensor] = None context_input_ids: Optional[tf.Tensor] = None context_attention_mask: Optional[tf.Tensor] = None question_encoder_last_hidden_state: Optional[tf.Tensor] = None question_enc_hidden_states: Optional[Tuple[tf.Tensor]] = None question_enc_attentions: Optional[Tuple[tf.Tensor]] = None generator_enc_last_hidden_state: Optional[tf.Tensor] = None generator_enc_hidden_states: Optional[Tuple[tf.Tensor]] = None generator_enc_attentions: Optional[Tuple[tf.Tensor]] = None generator_dec_hidden_states: Optional[Tuple[tf.Tensor]] = None generator_dec_attentions: Optional[Tuple[tf.Tensor]] = None class TFRagPreTrainedModel(TFPreTrainedModel): r""" RAG models were released with the paper `Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks <https://arxiv.org/abs/2005.11401>`__ by Patrick Lewis, Ethan Perez, Aleksandra Piktus et al. RAG is a retriever augmented model and encapsulate three components: a question encoder, a dataset retriever and a generator, the encoder and generator are trainable while the retriever is just an indexed dataset. """ config_class = RagConfig base_model_prefix = "rag" _keys_to_ignore_on_load_missing = [r"position_ids"] @classmethod def from_pretrained_question_encoder_generator( cls, question_encoder_pretrained_model_name_or_path: str = None, generator_pretrained_model_name_or_path: str = None, retriever: RagRetriever = None, *model_args, **kwargs ) -> TFPreTrainedModel: r""" Instantiates an question encoder and a generator from one or two base classes of the library from pretrained model checkpoints. Params: question_encoder_pretrained_model_name_or_path (:obj: `str`, `optional`): Information necessary to initiate the question encoder. Can be either: - A string with the `shortcut name` of a pretrained model to load from cache or download, e.g., ``bert-base-uncased``. - A string with the `identifier name` of a pretrained model that was user-uploaded to our S3, e.g., ``dbmdz/bert-base-german-cased``. - A path to a `directory` containing model weights saved using :func:`~transformers.TFPreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``. - A path or url to a `pytorch index checkpoint file` (e.g, ``./pt_model/``). In this case, ``question_encoder_from_pt`` should be set to :obj:`True`. generator_pretrained_model_name_or_path (:obj: `str`, `optional`, defaults to `None`): Information necessary to initiate the generator. Can be either: - A string with the `shortcut name` of a pretrained model to load from cache or download, e.g., ``t5-small``. - A string with the `identifier name` of a pretrained model that was user-uploaded to our S3, e.g., ``facebook/bart-base``. - A path to a `directory` containing model weights saved using :func:`~transformers.TFPreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``. - A path or url to a `pytorch checkpoint file` (e.g, ``./pt_model/``). In this case, ``generator_from_pt`` should be set to :obj:`True`. model_args (remaining positional arguments, `optional`): All remaning positional arguments will be passed to the underlying model's ``__init__`` method. retriever (:class:`~transformers.RagRetriever`, `optional`): The retriever to use. kwargs (remaining dictionary of keyword arguments, `optional`): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., ``output_attentions=True``). - To update the question_encoder configuration, use the prefix `question_encoder_` for each configuration parameter. - To update the generator configuration, use the prefix `generator_` for each configuration parameter. - To update the parent model configuration, do not use a prefix for each configuration parameter. Behaves differently depending on whether a :obj:`config` is provided or automatically loaded. Example:: >>> from transformers import RagRetriever, TFRagModel >>> # initialize a RAG from two pretrained models. >>> model = TFRagModel.from_pretrained_question_encoder_generator('facebook/dpr-question_encoder-single-nq-base', 't5-small') >>> # alternatively, initialize from pytorch pretrained models can also be done >>> model = TFRagModel.from_pretrained_question_encoder_generator('facebook/dpr-question_encoder-single-nq-base', "facebook/bart-base", generator_from_pt=True, question_encoder_from_pt=True) >>> # saving model after fine-tuning >>> model.save_pretrained("./rag") >>> # load retriever >>> retriever = RagRetriever.from_pretrained(PATH, index_name="exact", use_dummy_dataset=True) >>> # load fine-tuned model with retriver >>> model = TFRagModel.from_pretrained("./rag", retriever=retriever) """ kwargs_question_encoder = { argument[len("question_encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("question_encoder_") } kwargs_generator = { argument[len("generator_") :]: value for argument, value in kwargs.items() if argument.startswith("generator_") } # remove question_encoder, generator kwargs from kwargs for key in kwargs_question_encoder.keys(): del kwargs["question_encoder_" + key] for key in kwargs_generator.keys(): del kwargs["generator_" + key] # Load and initialize the question_encoder and generator # The distinction between question_encoder and generator at the model level is made # by the value of the flag `is_generator` that we need to set correctly. question_encoder = kwargs_question_encoder.pop("model", None) if question_encoder is None: assert ( question_encoder_pretrained_model_name_or_path is not None ), "If `model` is not defined as an argument, a `question_encoder_pretrained_model_name_or_path` has to be defined" from ..auto.modeling_tf_auto import TFAutoModel if "config" not in kwargs_question_encoder: from ..auto.configuration_auto import AutoConfig question_encoder_config = AutoConfig.from_pretrained(question_encoder_pretrained_model_name_or_path) kwargs_question_encoder["config"] = question_encoder_config question_encoder = TFAutoModel.from_pretrained( question_encoder_pretrained_model_name_or_path, name="question_encoder", load_weight_prefix=cls.load_weight_prefix, *model_args, **kwargs_question_encoder, ) generator = kwargs_generator.pop("generator", None) if generator is None: assert ( generator_pretrained_model_name_or_path is not None ), "If `generator_model` is not defined as an argument, a `generator_pretrained_model_name_or_path` has to be defined" from ..auto.modeling_tf_auto import TFAutoModelForSeq2SeqLM if "config" not in kwargs_generator: from ..auto.configuration_auto import AutoConfig generator_config = AutoConfig.from_pretrained(generator_pretrained_model_name_or_path) kwargs_generator["config"] = generator_config generator = TFAutoModelForSeq2SeqLM.from_pretrained( generator_pretrained_model_name_or_path, name="generator", load_weight_prefix=cls.load_weight_prefix, **kwargs_generator, ) # instantiate config with corresponding kwargs config = kwargs.get("config", None) if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) return cls(question_encoder=question_encoder, generator=generator, config=config, retriever=retriever) RAG_START_DOCSTRING = r""" RAG is a sequence-to-sequence model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator. The question encoder can be any `autoencoding` model, preferably :class:`~transformers.TFDPRQuestionEncoder`, and the generator can be any `seq2seq` model, preferably :class:`~transformers.TFBartForConditionalGeneration`. The model can be initialized with a :class:`~transformers.RagRetriever` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `autoencoding` model as the ``question_encoder`` and any `seq2seq` model with language model head as the ``generator``. It has been tested with :class:`~transformers.TFDPRQuestionEncoder` as the ``question_encoder`` and :class:`~transformers.TFBartForConditionalGeneration` as the ``generator``. This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Tensorflow `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. The model is in a developing state as it is now fully supports in eager-mode only, and may not be exported in SavedModel format. Args: config (:class:`~transformers.RagConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.TFPreTrainedModel.from_pretrained` method to load the model weights. question_encoder (:class:`transformers.TFPreTrainedModel`): An encoder model compatible with the faiss index encapsulated by the ``retriever``. generator (:class:`transformers.TFPreTrainedModel`): A seq2seq model used as the generator in the RAG architecture. retriever (:class:`~transformers.RagRetriever`): A retriever class encapsulating a faiss index queried to obtain context documents for current inputs. """ RAG_FORWARD_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. :class:`~transformers.RagConfig`, used to initialize the model, specifies which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to obtain the indices. attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ encoder_outputs (:obj:`tuple(tuple(tf.Tensor)`, `optional`) Tuple consists of (:obj:`generator_enc_last_hidden_state`, `optional`: :obj:`generator_enc_hidden_states`, `optional`: :obj:`generator_enc_attentions`). :obj:`generator_enc_last_hidden_state` of shape :obj:`(batch_size, n_docs * sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the generator's encoder. Used by the (:class:`~transformers.TFRagModel`) model during decoding. decoder_input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`): Provide for generation tasks. `None` by default, construct as per instructions for the generator model you're using with your RAG instance. decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`): Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will also be used by default. past_key_values (:obj:`tuple(tuple(tf.Tensor))`): Tuple consists of two elements: :obj:`encoder_outputs` of the RAG model (see :obj:`encoder_outputs`) and :obj:`past_key_values` of the underlying generator. Can be used to speed up decoding. :obj:`past_key_values` are used in the (:class:`~transformers.RagTokenForGeneration`) model during decoding. doc_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see :obj:`retrieved_doc_embeds`) and :obj:`question_encoder_last_hidden_state`. If the model has is not initialized with a ``retriever`` :obj:`doc_scores` has to be provided to the forward pass. :obj:`doc_scores` can be computed via :obj:`question_encoder_last_hidden_state` and :obj:`retrieved_doc_embeds`, see examples for more information. context_input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`): Input IDs post-processed from the retrieved documents and the question encoder :obj:`input_ids` by the retriever. If the model has is not initialized with a ``retriever`` :obj:`context_input_ids` has to be provided to the forward pass. :obj:`context_input_ids` are returned by :meth:`~transformers.RagRetriever.__call__`. context_attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`): Attention mask post-processed from the retrieved documents and the question encoder :obj:`input_ids` by the retriever. If the model has is not initialized with a ``retriever`` :obj:`context_attention_mask` has to be provided to the forward pass. :obj:`context_attention_mask` are returned by :meth:`~transformers.RagRetriever.__call__`. use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. output_retrieved(:obj:`bool`, `optional`): Whether or not to return the :obj:`retrieved_doc_embeds`, :obj:`retrieved_doc_ids`, :obj:`context_input_ids` and :obj:`context_attention_mask`. See returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~TFRetrievAugLMOutput` instead of a plain tuple. n_docs (:obj:`int`, `optional`, defaults to :obj:`config.n_docs`) Number of documents to retrieve and/or number of documents for which to generate an answer. """ @add_start_docstrings_to_model_forward(RAG_START_DOCSTRING) class TFRagModel(TFRagPreTrainedModel): load_weight_prefix = "tf_rag_model_1" def __init__( self, config: Optional[PretrainedConfig] = None, question_encoder: Optional[TFPreTrainedModel] = None, generator: Optional[TFPreTrainedModel] = None, retriever: Optional = None, load_weight_prefix: Optional[str] = None, **kwargs, ): assert config is not None or ( question_encoder is not None and generator is not None ), "Either a configuration or an question_encoder and a generator has to be provided." if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) else: assert isinstance(config, self.config_class), "config: {} has to be of type {}".format( config, self.config_class ) super().__init__(config, **kwargs) if question_encoder is None: from ..auto.modeling_tf_auto import TFAutoModel question_encoder = TFAutoModel.from_config(config.question_encoder, name="question_encoder") if generator is None: from ..auto.modeling_tf_auto import TFAutoModelForSeq2SeqLM load_weight_prefix = load_weight_prefix if load_weight_prefix is not None else self.load_weight_prefix generator = TFAutoModelForSeq2SeqLM.from_config( config.generator, name="generator", load_weight_prefix=load_weight_prefix + "/generator" ) self.retriever = retriever if self.retriever is not None: assert isinstance( retriever, RagRetriever ), f"`self.retriever` is of type {type(self.retriever)}, but should be of type `RagRetriever`" self.retriever = retriever self.question_encoder = question_encoder self.generator = generator def set_retriever(self, retriever: RagRetriever): self.retriever = retriever @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFRetrievAugLMOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids=None, attention_mask=None, encoder_outputs=None, decoder_input_ids=None, decoder_attention_mask=None, past_key_values=None, doc_scores=None, context_input_ids=None, context_attention_mask=None, use_cache=None, output_attentions=None, output_hidden_states=None, output_retrieved=None, n_docs=None, return_dict=None, training=False, **kwargs ): r""" Returns: Example:: >>> from transformers import RagTokenizer, RagRetriever, RagModel >>> import torch >>> tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-base") >>> retriever = RagRetriever.from_pretrained("facebook/rag-token-base", index_name="exact", use_dummy_dataset=True) >>> # initialize with RagRetriever to do everything in one forward call >>> model = TFRagModel.from_pretrained("facebook/rag-token-base", retriever=retriever, from_pt=True) >>> input_dict = tokenizer.prepare_seq2seq_batch("How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf") >>> input_ids = input_dict["input_ids"] >>> outputs = model(input_ids) """ assert ( "decoder_cached_states" not in kwargs ), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, doc_scores=doc_scores, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_retrieved=output_retrieved, return_dict=return_dict, n_docs=n_docs, training=training, kwargs_call=kwargs, ) # aliasing to minimize code changing input_ids = inputs["input_ids"] attention_mask = inputs["attention_mask"] decoder_input_ids = inputs["decoder_input_ids"] decoder_attention_mask = inputs["decoder_attention_mask"] encoder_outputs = inputs["encoder_outputs"] past_key_values = inputs["past_key_values"] doc_scores = inputs["doc_scores"] context_input_ids = inputs["context_input_ids"] context_attention_mask = inputs["context_attention_mask"] use_cache = inputs["use_cache"] output_attentions = inputs["output_attentions"] output_hidden_states = inputs["output_hidden_states"] return_dict = inputs["return_dict"] n_docs = inputs["n_docs"] if inputs["n_docs"] is not None else self.config.n_docs output_retrieved = inputs["output_retrieved"] training = inputs["training"] # whether retriever has to be used has_to_retrieve = ( self.retriever is not None and (context_input_ids is None or context_attention_mask is None or doc_scores is None) and encoder_outputs is None ) # encoder_outputs are pre-computed during RAG-token generation if encoder_outputs is None: if has_to_retrieve: question_enc_outputs = self.question_encoder( input_ids, attention_mask=attention_mask, return_dict=True, training=training ) # see https://github.com/huggingface/transformers/blob/master/src/transformers/models/dpr/modeling_tf_dpr.py#L91 question_encoder_last_hidden_state = question_enc_outputs[ 0 ] # hidden states of question encoder => pooler_output retriever_outputs = self.retriever( input_ids, question_encoder_last_hidden_state.numpy(), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors="tf", ) context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_ids = ( retriever_outputs["context_input_ids"], retriever_outputs["context_attention_mask"], retriever_outputs["retrieved_doc_embeds"], retriever_outputs["doc_ids"], ) context_input_ids = tf.cast(context_input_ids, tf.int32) context_attention_mask = tf.cast(context_attention_mask, tf.int32) retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32) retrieved_doc_ids = tf.cast(retrieved_doc_ids, tf.int32) # compute doc_scores doc_scores = tf.squeeze( tf.matmul( tf.expand_dims(question_encoder_last_hidden_state, axis=1), retrieved_doc_embeds, transpose_b=True, ), axis=1, ) else: assert ( context_input_ids is not None ), "Make sure that `context_input_ids` are passed, if no `retriever` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function." assert ( context_attention_mask is not None ), "Make sure that `context_attention_mask` are passed, if no `retriever` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function." assert ( doc_scores is not None ), "Make sure that `doc_scores` are passed, if no `retriever` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function." assert ( doc_scores is not None ), "Make sure that `doc_scores` are passed when passing `encoder_outputs` to the forward function." assert ( doc_scores.shape[1] % n_docs ) == 0, f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is {context_input_ids.shape[0]}." # Decoder input without context documents if decoder_input_ids is not None: decoder_input_ids = tf.repeat(decoder_input_ids, n_docs, axis=0) if decoder_attention_mask is not None: decoder_attention_mask = tf.repeat(decoder_attention_mask, n_docs, axis=0) gen_outputs = self.generator( context_input_ids, attention_mask=context_attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, return_dict=True, training=training, ) if not has_to_retrieve: question_encoder_last_hidden_state = None question_enc_hidden_states = None question_enc_attentions = None retrieved_doc_embeds = None retrieved_doc_ids = None else: question_enc_hidden_states = question_enc_outputs.hidden_states question_enc_attentions = question_enc_outputs.attentions if not has_to_retrieve or not output_retrieved: # don't output retrieved docs context_input_ids = (None,) context_attention_mask = None retrieved_doc_embeds = None retrieved_doc_ids = None return TFRetrievAugLMOutput( logits=gen_outputs.logits, doc_scores=doc_scores, past_key_values=gen_outputs.past_key_values, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, retrieved_doc_embeds=retrieved_doc_embeds, retrieved_doc_ids=retrieved_doc_ids, question_encoder_last_hidden_state=question_encoder_last_hidden_state, question_enc_hidden_states=question_enc_hidden_states, question_enc_attentions=question_enc_attentions, generator_enc_last_hidden_state=gen_outputs.encoder_last_hidden_state, generator_enc_hidden_states=gen_outputs.encoder_hidden_states, generator_enc_attentions=gen_outputs.encoder_attentions, generator_dec_hidden_states=gen_outputs.decoder_hidden_states, generator_dec_attentions=gen_outputs.decoder_attentions, ) @add_start_docstrings_to_model_forward( """ A TF RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass. """, RAG_START_DOCSTRING, ) class TFRagTokenForGeneration(TFRagPreTrainedModel, TFCausalLanguageModelingLoss): load_weight_prefix = "tf_rag_token_for_generation_1/rag" def __init__( self, config: Optional[PretrainedConfig] = None, question_encoder: Optional[TFPreTrainedModel] = None, generator: Optional[TFPreTrainedModel] = None, retriever: Optional = None, **kwargs, ): assert config is not None or ( question_encoder is not None and generator is not None ), "Either a configuration or an encoder and a generator has to be provided." if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) super().__init__(config) # instantiate model self.rag = TFRagModel( config=config, question_encoder=question_encoder, generator=generator, retriever=retriever, load_weight_prefix=self.load_weight_prefix, name="rag", ) def set_retriever(self, retriever: RagRetriever): self.rag.retriever = retriever # Adapted from https://github.com/huggingface/transformers/blob/master/src/transformers/modeling_tf_bart.py def prepare_inputs_for_generation( self, decoder_input_ids, past, attention_mask, use_cache, doc_scores, n_docs=None, **kwargs ) -> Dict: assert past is not None and len(past) in {1, 2}, f"past has to be an iterable of length 1,2 got {past}" if len(past) == 1: assert isinstance(past[0], tf.Tensor) encoder_outputs = TFBaseModelOutput(last_hidden_state=past[0]) decoder_cached_states = None else: assert len(past) == 2 # Note: encoder_outputs is never changed by Bart as a generator encoder_outputs, decoder_cached_states = past if isinstance(encoder_outputs, tuple): assert isinstance(encoder_outputs[0], tf.Tensor) encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs[0]) elif isinstance(encoder_outputs, tf.Tensor): encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs) assert ( decoder_cached_states ), f"decoder cached states must be truthy. got {decoder_cached_states} from the 2nd element of past" # if past is defined cut decoder_input_ids to last token decoder_input_ids = decoder_input_ids[:, -1:] assert isinstance( encoder_outputs, TFBaseModelOutput ), f"encoder_outputs should be a TFBaseModelOutput, Instead got {type(encoder_outputs)}." return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "doc_scores": doc_scores, "context_attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "past_key_values": decoder_cached_states, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) "do_marginalize": True, "n_docs": n_docs, } @property def retriever(self): return self.rag.retriever @property def generator(self): return self.rag.generator @property def question_encoder(self): return self.rag.question_encoder @staticmethod def _reorder_cache(past, beam_idx): """Reorders cache for generation. BART-inspired but we need to take care of the extra dimension for docs""" def tf_index_select(input_, dim, indices): """ Input: input_(tensor): input tensor dim(int): dimension indices(list): selected indices list Output: mimic of torch_tensor.index_select(dim, indices) credit: https://stackoverflow.com/questions/58464790/is-there-an-equivalent-function-of-pytorch-named-index-select-in-tensorflow """ shape = shape_list(input_) if dim == -1: dim = len(shape) - 1 shape[dim] = 1 tmp = [] for idx in indices: begin = [0] * len(shape) begin[dim] = idx tmp.append(tf.slice(input_, begin, shape)) res = tf.concat(tmp, axis=dim) return res def _reorder_stacked(hidden_states, new_order=beam_idx): n_docs = hidden_states.shape[0] // new_order.shape[0] hidden_states = tf.reshape(hidden_states, (-1, n_docs, *hidden_states.shape[1:])) hidden_states = tf_index_select(hidden_states, 0, new_order) return tf.reshape(hidden_states, (-1, *hidden_states.shape[2:])) if len(past) == 1: return past past_key_values = past[1] reordered_past = () for layer_past in past_key_values: reordered_past += (tuple(_reorder_stacked(past_state, beam_idx) for past_state in layer_past),) return (past[0], reordered_past) def marginalize(self, seq_logits, doc_scores, n_docs=None): n_docs = n_docs if n_docs is not None else self.config.n_docs # RAG-token marginalization seq_logprobs = tf.nn.log_softmax(seq_logits, axis=-1) seq_logprobs = tf.reshape(seq_logprobs, [seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.shape[-1]]) doc_logprobs = tf.nn.log_softmax(doc_scores, axis=1) doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) # twice log_prob_sum = seq_logprobs + doc_logprobs return tf.reduce_logsumexp(log_prob_sum, axis=1) @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFRetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs=None, past_key_values=None, doc_scores=None, context_input_ids=None, context_attention_mask=None, use_cache=None, output_attentions=None, output_hidden_states=None, output_retrieved=None, n_docs=None, do_marginalize=None, labels=None, reduce_loss=None, return_dict=None, training=False, **kwargs # needs kwargs for generation ): r""" do_marginalize (:obj:`bool`, `optional`): If :obj:`True`, the logits are marginalized over all documents by making use of ``torch.nn.functional.log_softmax``. labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the cross entropy classification loss according to Rag-Token model formulation See https://arxiv.org/pdf/2005.11401.pdf Section 2.1 for details about Rag-Token formulation. Indices should be in ``[0, ..., config.vocab_size - 1]``. reduce_loss (:obj:`bool`, `optional`): Only relevant if ``labels`` is passed. If :obj:`True`, the NLL loss is reduced using the ``tf.Tensor.sum`` operation. kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`): Legacy dictionary, which is required so that model can use `generate()` function. Returns: Example:: >>> from transformers import RagTokenizer, RagRetriever, TFRagTokenForGeneration >>> tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq") >>> retriever = RagRetriever.from_pretrained("facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True) >>> # initialize with RagRetriever to do everything in one forward call >>> model = TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever, from_pt=True) >>> input_dict = tokenizer.prepare_seq2seq_batch("How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf") >>> outputs = model(input_dict, output_retrieved=True) >>> # or use retriever separately >>> # 1. Encode >>> input_ids = input_dict["input_ids"] >>> question_hidden_states = model.question_encoder(input_ids)[0] >>> # 2. Retrieve >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf") >>> doc_scores = tf.squeeze(tf.matmul(tf.expand_dims(question_hidden_states, axis=1), docs_dict["retrieved_doc_embeds"], transpose_b=True), axis=1) >>> # 3. Forward to generator >>> outputs = model(inputs=None, context_input_ids=docs_dict["context_input_ids"], context_attention_mask=docs_dict["context_attention_mask"], doc_scores=doc_scores, decoder_input_ids=input_dict["labels"]) >>> # or directly generate >>> generated = model.generate(context_input_ids=docs_dict["context_input_ids"], context_attention_mask=docs_dict["context_attention_mask"], doc_scores=doc_scores) >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True) """ assert ( "decoder_cached_states" not in kwargs ), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, doc_scores=doc_scores, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_retrieved=output_retrieved, n_docs=n_docs, do_marginalize=do_marginalize, labels=labels, reduce_loss=reduce_loss, return_dict=return_dict, training=training, kwargs_call=kwargs, ) inputs["do_marginalize"] = inputs["do_marginalize"] if inputs["do_marginalize"] else self.config.do_marginalize inputs["reduce_loss"] = inputs["reduce_loss"] if inputs["reduce_loss"] else self.config.reduce_loss if inputs["labels"] is not None: if inputs["decoder_input_ids"] is None: inputs["decoder_input_ids"] = inputs["labels"] inputs["use_cache"] = False outputs = self.rag( inputs["input_ids"], attention_mask=inputs["attention_mask"], encoder_outputs=inputs["encoder_outputs"], decoder_input_ids=inputs["decoder_input_ids"], decoder_attention_mask=inputs["decoder_attention_mask"], context_input_ids=inputs["context_input_ids"], context_attention_mask=inputs["context_attention_mask"], doc_scores=inputs["doc_scores"], past_key_values=inputs["past_key_values"], use_cache=inputs["use_cache"], output_attentions=inputs["output_attentions"], output_hidden_states=inputs["output_hidden_states"], output_retrieved=inputs["output_retrieved"], n_docs=inputs["n_docs"], training=inputs["training"], ) loss = None logits = outputs.logits if inputs["labels"] is not None: assert inputs["decoder_input_ids"] is not None loss = self.get_nll( outputs.logits, outputs.doc_scores, inputs["labels"], reduce_loss=inputs["reduce_loss"], epsilon=self.config.label_smoothing, n_docs=inputs["n_docs"], ) if inputs["do_marginalize"]: logits = self.marginalize(logits, outputs.doc_scores, inputs["n_docs"]) return TFRetrievAugLMMarginOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, doc_scores=outputs.doc_scores, context_input_ids=outputs.context_input_ids, context_attention_mask=outputs.context_attention_mask, retrieved_doc_embeds=outputs.retrieved_doc_embeds, retrieved_doc_ids=outputs.retrieved_doc_ids, question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state, question_enc_hidden_states=outputs.question_enc_hidden_states, question_enc_attentions=outputs.question_enc_attentions, generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state, generator_enc_hidden_states=outputs.generator_enc_hidden_states, generator_enc_attentions=outputs.generator_enc_attentions, generator_dec_hidden_states=outputs.generator_dec_hidden_states, generator_dec_attentions=outputs.generator_dec_attentions, ) def generate( self, input_ids: Optional[tf.Tensor] = None, attention_mask: Optional[tf.Tensor] = None, context_input_ids=None, context_attention_mask=None, doc_scores=None, max_length=None, min_length=None, early_stopping=None, use_cache=None, num_beams=None, bos_token_id=None, pad_token_id=None, eos_token_id=None, length_penalty=None, no_repeat_ngram_size=None, bad_words_ids=None, num_return_sequences=None, decoder_start_token_id=None, n_docs=None, **kwargs ): """ Implements TFRAG token decoding. Args: input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): The sequence used as a prompt for the generation. If :obj:`input_ids` is not passed, then :obj:`context_input_ids` has to be provided. attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ context_input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`): Input IDs post-processed from the retrieved documents and the question encoder :obj:`input_ids` by the retriever. If the model has is not initialized with a ``retriever``, :obj:`context_input_ids` has to be provided to the forward pass. :obj:`context_input_ids` are returned by :meth:`~transformers.RagRetriever.__call__`. context_attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`): Attention mask post-processed from the retrieved documents and the question encoder :obj:`input_ids` by the retriever. If the model has is not initialized with a ``retriever``, :obj:`context_input_ids` has to be provided to the forward pass. :obj:`context_input_ids` are returned by :meth:`~transformers.RagRetriever.__call__`. doc_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see :obj:`retrieved_doc_embeds`) and :obj:`question_encoder_last_hidden_state`. If the model has is not initialized with a ``retriever``, :obj:`context_input_ids` has to be provided to the forward pass. :obj:`context_input_ids` are returned by :meth:`~transformers.RagRetriever.__call__`. max_length (:obj:`int`, `optional`, defaults to 20): The maximum length of the sequence to be generated. min_length (:obj:`int`, `optional`, defaults to 10): The minimum length of the sequence to be generated. early_stopping (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to stop the beam search when at least ``num_beams`` sentences are finished per batch or not. use_cache: (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding. pad_token_id (:obj:`int`, `optional`): The id of the `padding` token. bos_token_id (:obj:`int`, `optional`): The id of the `beginning-of-sequence` token. eos_token_id (:obj:`int`, `optional`): The id of the `end-of-sequence` token. length_penalty (:obj:`float`, `optional`, defaults to 1.0): Exponential penalty to the length. 1.0 means no penalty. Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in order to encourage the model to produce longer sequences. no_repeat_ngram_size (:obj:`int`, `optional`, defaults to 0): If set to int > 0, all ngrams of that size can only occur once. bad_words_ids(:obj:`List[int]`, `optional`): List of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use :obj:`tokenizer.encode(bad_word, add_prefix_space=True)`. num_beams (:obj:`int`, `optional`, defaults to 1): Number of beams for beam search. 1 means no beam search. num_return_sequences(:obj:`int`, `optional`, defaults to 1): The number of independently computed returned sequences for each element in the batch. Note that this is not the value we pass to the ``generator``'s `:func:`~transformers.PreTrainedModel.generate` function, where we set ``num_return_sequences`` to :obj:`num_beams`. decoder_start_token_id (:obj:`int`, `optional`): If an encoder-decoder model starts decoding with a different token than `bos`, the id of that token. n_docs (:obj:`int`, `optional`, defaults to :obj:`config.n_docs`) Number of documents to retrieve and/or number of documents for which to generate an answer. Return: :obj:`tf.Tensor` of shape :obj:`(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`. """ # set default parameters n_docs = n_docs if n_docs is not None else self.config.n_docs max_length = max_length if max_length is not None else self.config.max_length min_length = min_length if min_length is not None else self.config.min_length early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping use_cache = use_cache if use_cache is not None else self.config.use_cache num_beams = num_beams if num_beams is not None else self.config.num_beams bos_token_id = bos_token_id if bos_token_id is not None else self.config.generator.bos_token_id pad_token_id = pad_token_id if pad_token_id is not None else self.config.generator.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.generator.eos_token_id length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty no_repeat_ngram_size = ( no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size ) bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids num_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences ) decoder_start_token_id = ( decoder_start_token_id if decoder_start_token_id is not None else self.config.generator.decoder_start_token_id ) # retrieve docs if self.retriever is not None and context_input_ids is None: question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0] out = self.retriever( input_ids, question_hidden_states.numpy().astype(np.float32), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors="tf", ) context_input_ids, context_attention_mask, retrieved_doc_embeds = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) context_input_ids = tf.cast(context_input_ids, tf.int32) context_attention_mask = tf.cast(context_attention_mask, tf.int32) retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32) # compute doc_scores doc_scores = tf.matmul( tf.expand_dims(question_hidden_states, axis=1), retrieved_doc_embeds, transpose_b=True ) doc_scores = tf.squeeze(doc_scores, axis=1) assert ( context_input_ids.shape[0] % n_docs ) == 0, f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is {context_input_ids.shape[0]}." batch_size = context_input_ids.shape[0] // n_docs encoder = self.rag.generator.get_encoder() encoder_outputs = encoder(input_ids=context_input_ids, attention_mask=context_attention_mask, return_dict=True) decoder_input_ids = tf.fill( (batch_size * num_beams, 1), tf.cast(decoder_start_token_id, tf.int32), ) last_hidden_state = encoder_outputs["last_hidden_state"] def extend_enc_output(tensor, num_beams=None): """ Broadcast tensor with `num_beams` replica, with correct order Input: tensor of shape (batch_size*n_docs , d) Output: tensor of shape (batch_size*num_beams*n_docs , d) """ # expand batch_size & num_beam dimensions d_shape_list = tensor.shape[1:] # split n_docs dimensions new_shape = (batch_size, 1, n_docs) + d_shape_list tensor = tf.reshape(tensor, new_shape) # repeat same last hidden states over `num_beams` dimension new_shape = (batch_size, num_beams, n_docs) + d_shape_list tensor = tf.broadcast_to(tensor, new_shape) # merge `batch_size`, `num_beams`, `num_docs` dims again new_shape = (batch_size * num_beams * n_docs,) + d_shape_list return tf.reshape(tensor, new_shape) # correctly extend last_hidden_state and attention mask context_attention_mask = extend_enc_output(context_attention_mask, num_beams=num_beams) encoder_outputs["last_hidden_state"] = extend_enc_output(last_hidden_state, num_beams=num_beams) doc_scores = tf.repeat(doc_scores, num_beams, axis=0) # define start_len & additional parameters cur_len = 1 vocab_size = self.config.generator.vocab_size kwargs["doc_scores"] = doc_scores kwargs["encoder_outputs"] = encoder_outputs kwargs["n_docs"] = n_docs # not needed. TODO(PVP): change after generate refactor do_sample = False temperature = self.config.temperature top_k = self.config.top_k top_p = self.config.top_p repetition_penalty = self.config.repetition_penalty if num_beams > 1: return self._generate_beam_search( decoder_input_ids, cur_len=cur_len, max_length=max_length, min_length=min_length, do_sample=do_sample, early_stopping=early_stopping, temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, no_repeat_ngram_size=no_repeat_ngram_size, bad_words_ids=bad_words_ids, pad_token_id=pad_token_id, eos_token_id=eos_token_id, batch_size=batch_size, num_return_sequences=num_return_sequences, length_penalty=length_penalty, num_beams=num_beams, vocab_size=vocab_size, attention_mask=context_attention_mask, use_cache=use_cache, forced_bos_token_id=None, forced_eos_token_id=None, **kwargs, # encoder_outputs is here as in Pytorch's version ) else: return self._generate_no_beam_search( decoder_input_ids, cur_len=cur_len, max_length=max_length, min_length=min_length, do_sample=do_sample, temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, no_repeat_ngram_size=no_repeat_ngram_size, bad_words_ids=bad_words_ids, pad_token_id=pad_token_id, eos_token_id=eos_token_id, batch_size=batch_size, vocab_size=vocab_size, attention_mask=context_attention_mask, use_cache=use_cache, forced_bos_token_id=None, forced_eos_token_id=None, **kwargs, # encoder_outputs is here as in Pytorch's version ) def get_input_embeddings(self): return self.rag.generator.get_input_embeddings() def get_output_embeddings(self): return self.rag.generator.get_output_embeddings() # Adapted from tf_t5's & tf_bart's _shift_right def shift_tokens_right(self, input_ids, start_token_id=None): """Shift input ids one token to the right, and pad with start_token_id""" if start_token_id is None: start_token_id = self.generator.config.decoder_start_token_id assert ( start_token_id is not None ), "self.generator.config.decoder_start_token_id has to be defined. In Rag we commonly use Bart as generator, see Bart docs for more information" pad_token_id = self.generator.config.pad_token_id assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." shifted_input_ids = tf.cast(input_ids, tf.int32) shifted_input_ids = tf.roll(shifted_input_ids, 1, axis=-1) start_tokens = tf.fill((shape_list(shifted_input_ids)[0], 1), start_token_id) shifted_input_ids = tf.concat([start_tokens, shifted_input_ids[:, 1:]], -1) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids ) # "Verify that `labels` has only positive values and -100" assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.cast(0, tf.int32)) # Make sure the assertion op is called by wrapping the result in an identity no-op with tf.control_dependencies([assert_gte0]): shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids # nll stands for 'negative log likelihood' def get_nll(self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, n_docs=None): n_docs = n_docs if n_docs is not None else self.config.n_docs # shift tokens left (from original Pytorch's version) target = tf.concat([target[:, 1:], tf.fill([target.shape[0], 1], self.config.generator.pad_token_id)], axis=1) rag_logprobs = self.marginalize(seq_logits, doc_scores, n_docs) loss = self.compute_loss(target, rag_logprobs, from_logits=True, reduce_loss=reduce_loss) return loss # Adopted modeling_tf_bart + add smooth_loss to match with pytorch version def compute_loss(self, labels, y_pred, smooth_epsilon=0.0, from_logits=True, reduce_loss=False): """CrossEntropyLoss that ignores pad tokens""" loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.SUM, ) if from_logits is False: # convert to logits eps = 1e-9 y_pred = tf.clip_by_value(y_pred, clip_value_min=eps, clip_value_max=1 - eps) y_pred = tf.math.log(y_pred) logits = y_pred melted_labels = tf.reshape(labels, (-1,)) active_loss = tf.not_equal(melted_labels, self.config.generator.pad_token_id) reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, logits.shape[2])), active_loss) labels = tf.boolean_mask(melted_labels, active_loss) nll_loss = loss_fn(labels, reduced_logits) smooth_loss = -tf.reduce_sum(reduced_logits, axis=-1) smooth_loss = tf.reduce_sum(smooth_loss) # sum and squeeze like torch eps_i = smooth_epsilon / reduced_logits.shape[-1] loss = (1.0 - smooth_epsilon) * nll_loss + eps_i * smooth_loss return loss @add_start_docstrings_to_model_forward( """ A TF RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass. """, RAG_START_DOCSTRING, ) class TFRagSequenceForGeneration(TFRagPreTrainedModel, TFCausalLanguageModelingLoss): load_weight_prefix = "tf_rag_sequence_for_generation_1/rag" def __init__( self, config: Optional[PretrainedConfig] = None, question_encoder: Optional[TFPreTrainedModel] = None, generator: Optional[TFPreTrainedModel] = None, retriever: Optional = None, **kwargs, ): assert config is not None or ( question_encoder is not None and generator is not None ), "Either a configuration or an encoder and a generator has to be provided." if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) super().__init__(config) # instantiate model self.rag = TFRagModel( config=config, question_encoder=question_encoder, generator=generator, retriever=retriever, load_weight_prefix=self.load_weight_prefix, name="rag", ) def set_retriever(self, retriever: RagRetriever): self.rag.retriever = retriever @property def retriever(self): return self.rag.retriever @property def generator(self): return self.rag.generator @property def question_encoder(self): return self.rag.question_encoder @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFRetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs=None, past_key_values=None, doc_scores=None, context_input_ids=None, context_attention_mask=None, use_cache=None, output_attentions=None, output_hidden_states=None, output_retrieved=None, n_docs=None, exclude_bos_score=None, labels=None, reduce_loss=None, return_dict=None, training=False, **kwargs # needs kwargs for generation ): r""" exclude_bos_score (:obj:`bool`, `optional`): Only relevant if ``labels`` is passed. If :obj:`True`, the score of the BOS token is disregarded when computing the loss. labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the cross entropy classification loss according to Rag-Sequence model formulation See https://arxiv.org/pdf/2005.11401.pdf Section 2.1 for details about Rag-Sequence formulation. Indices should be in ``[0, ..., config.vocab_size - 1]``. reduce_loss (:obj:`bool`, `optional`): Only relevant if ``labels`` is passed. If :obj:`True`, the NLL loss is reduced using the ``tf.Tensor.sum`` operation. kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`): Legacy dictionary, which is required so that model can use `generate()` function. Returns: Example:: >>> from transformers import RagTokenizer, RagRetriever, TFRagSequenceForGeneration >>> tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq") >>> retriever = RagRetriever.from_pretrained("facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True) >>> # initialize with RagRetriever to do everything in one forward call >>> model = TFRagRagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", retriever=retriever, from_pt=True) >>> input_dict = tokenizer.prepare_seq2seq_batch("How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf") >>> outputs = model(input_dict, output_retrieved=True) >>> # or use retriever separately >>> # 1. Encode >>> input_ids = input_dict["input_ids"] >>> question_hidden_states = model.question_encoder(input_ids)[0] >>> # 2. Retrieve >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf") >>> doc_scores = tf.squeeze(tf.matmul(tf.expand_dims(question_hidden_states, axis=1), docs_dict["retrieved_doc_embeds"], transpose_b=True), axis=1) >>> # 3. Forward to generator >>> outputs = model(inputs=None, context_input_ids=docs_dict["context_input_ids"], context_attention_mask=docs_dict["context_attention_mask"], doc_scores=doc_scores, decoder_input_ids=input_dict["labels"]) >>> # or directly generate >>> generated = model.generate(context_input_ids=docs_dict["context_input_ids"], context_attention_mask=docs_dict["context_attention_mask"], doc_scores=doc_scores) >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True) """ assert ( "decoder_cached_states" not in kwargs ), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, doc_scores=doc_scores, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_retrieved=output_retrieved, n_docs=n_docs, exclude_bos_score=exclude_bos_score, labels=labels, reduce_loss=reduce_loss, training=training, return_dict=return_dict, kwargs_call=kwargs, ) inputs["exclude_bos_score"] = ( inputs["exclude_bos_score"] if inputs["exclude_bos_score"] else self.config.exclude_bos_score ) inputs["reduce_loss"] = inputs["reduce_loss"] if inputs["reduce_loss"] else self.config.reduce_loss if inputs["labels"] is not None: if inputs["decoder_input_ids"] is None: inputs["decoder_input_ids"] = inputs["labels"] inputs["use_cache"] = False outputs = self.rag( inputs["input_ids"], attention_mask=inputs["attention_mask"], encoder_outputs=inputs["encoder_outputs"], decoder_input_ids=inputs["decoder_input_ids"], decoder_attention_mask=inputs["decoder_attention_mask"], context_input_ids=inputs["context_input_ids"], context_attention_mask=inputs["context_attention_mask"], doc_scores=inputs["doc_scores"], past_key_values=inputs["past_key_values"], use_cache=inputs["use_cache"], output_attentions=inputs["output_attentions"], output_hidden_states=inputs["output_hidden_states"], output_retrieved=inputs["output_retrieved"], n_docs=inputs["n_docs"], training=inputs["training"], ) loss = None if inputs["labels"] is not None: loss = self.get_nll( outputs.logits, outputs.doc_scores, inputs["labels"], reduce_loss=inputs["reduce_loss"], epsilon=self.config.label_smoothing, n_docs=inputs["n_docs"], ) return TFRetrievAugLMMarginOutput( loss=loss, logits=outputs.logits, doc_scores=outputs.doc_scores, past_key_values=outputs.past_key_values, context_input_ids=outputs.context_input_ids, context_attention_mask=outputs.context_attention_mask, retrieved_doc_embeds=outputs.retrieved_doc_embeds, retrieved_doc_ids=outputs.retrieved_doc_ids, question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state, question_enc_hidden_states=outputs.question_enc_hidden_states, question_enc_attentions=outputs.question_enc_attentions, generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state, generator_enc_hidden_states=outputs.generator_enc_hidden_states, generator_enc_attentions=outputs.generator_enc_attentions, generator_dec_hidden_states=outputs.generator_dec_hidden_states, generator_dec_attentions=outputs.generator_dec_attentions, ) def get_nll( self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, exclude_bos_score=False, n_docs=None ): # shift tokens left target = tf.concat([target[:, 1:], tf.fill([target.shape[0], 1], self.config.generator.pad_token_id)], axis=1) # bos_token_id is None for T5 bos_token_id = self.config.bos_token_id or self.config.generator.bos_token_id n_docs = n_docs if n_docs is not None else self.config.n_docs equal_bos_token_id_all = tf.reduce_all(tf.equal(target[:, 0], bos_token_id)) use_bos = bos_token_id is not None and equal_bos_token_id_all def _mask_pads(ll, smooth_obj): pad_mask = tf.equal(target, self.config.generator.pad_token_id) if tf.reduce_any(pad_mask): ll = tf.where(pad_mask, 0.0, ll) smooth_obj = tf.where(pad_mask, 0.0, smooth_obj) return tf.squeeze(ll, axis=-1), tf.squeeze(smooth_obj, axis=-1) # seq_logits.shape = (batch*n_docs, tgt_len , vocabs) seq_logprobs = tf.nn.log_softmax(seq_logits, axis=-1) seq_logprobs = tf.reshape( seq_logprobs, (seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.shape[-1]) ) # (batch_size, n_docs, tgt_len, vocabs) doc_logprobs = tf.nn.log_softmax(doc_scores, axis=1) doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) # done twice to get 4-D # RAG-sequence marginalization first_token_scores = seq_logprobs[:, :, :1, :] second_token_scores = seq_logprobs[:, :, 1:2, :] remainder = seq_logprobs[:, :, 2:, :] rag_logprobs = tf.concat([first_token_scores, second_token_scores + doc_logprobs, remainder], axis=2) # calculate loss target = tf.expand_dims(target, axis=1) # n_docs dimension target = tf.expand_dims(target, axis=-1) # logits dimension target = tf.repeat(target, n_docs, axis=1) assert len(target.shape) == len(rag_logprobs.shape) # last-axis gathering only - use 2D-reshape-trick for Torch's style nD gathering def torch_gather(param, id_tensor): # 2d-gather torch equivalent: https://stackoverflow.com/questions/52129909/tensorflow-equivalent-of-torch-gather def gather2d(target, id_tensor): idx = tf.stack([tf.range(tf.shape(id_tensor)[0]), id_tensor[:, 0]], axis=-1) result = tf.gather_nd(target, idx) return tf.expand_dims(result, axis=-1) target = tf.reshape(param, (-1, param.shape[-1])) # reshape 2D target_shape = id_tensor.shape id_tensor = tf.reshape(id_tensor, (-1, 1)) # also 2D-index result = gather2d(target, id_tensor) return tf.reshape(result, target_shape) ll = torch_gather(rag_logprobs, id_tensor=target) smooth_obj = tf.reduce_sum(rag_logprobs, axis=-1, keepdims=True) # total sum of all (normalised) logits ll, smooth_obj = _mask_pads(ll, smooth_obj) # sum over tokens, exclude bos while scoring if exclude_bos_score and use_bos: ll = tf.reduce_sum(ll[:, :, 1:], axis=2) else: ll = tf.reduce_sum(ll, axis=2) smooth_obj = tf.reduce_sum(smooth_obj, axis=2) ll = tf.math.reduce_logsumexp(ll, axis=1) # logsumexp over docs smooth_obj = tf.math.reduce_logsumexp(smooth_obj, axis=1) nll_loss = -ll smooth_loss = -smooth_obj if reduce_loss: nll_loss = tf.reduce_sum(nll_loss) smooth_loss = tf.reduce_sum(smooth_loss) eps_i = epsilon / rag_logprobs.shape[-1] loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss return loss def generate( self, input_ids: Optional[tf.Tensor] = None, attention_mask: Optional[tf.Tensor] = None, context_input_ids=None, context_attention_mask=None, doc_scores=None, do_deduplication=None, # defaults to True num_return_sequences=None, # defaults to 1 num_beams=None, # defaults to 1 n_docs=None, **model_kwargs ): """ Implements RAG sequence "thorough" decoding. Read the :meth:`~transformers.PreTrainedModel.generate`` documentation for more information on how to set other generate input parameters Args: input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): The sequence used as a prompt for the generation. If :obj:`input_ids` is not passed, then :obj:`context_input_ids` has to be provided. attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ context_input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`): Input IDs post-processed from the retrieved documents and the question encoder input_ids by the retriever. context_attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`): Attention mask post-processed from the retrieved documents and the question encoder :obj:`input_ids` by the retriever. If the model has is not initialized with a ``retriever`` or ``input_ids`` is not given, :obj:`context_input_ids` and :obj:`context_attention_mask` have to be provided to the forward pass. They are returned by :meth:`~transformers.RagRetriever.__call__`. doc_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see :obj:`retrieved_doc_embeds`) and :obj:`question_encoder_last_hidden_state`. If the model has is not initialized with a ``retriever`` or ``input_ids`` is not given, :obj:`doc_scores` has to be provided to the forward pass. :obj:`doc_scores` are returned by :meth:`~transformers.RagRetriever.__call__`. do_deduplication (:obj:`bool`, `optional`): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to :obj:`False` if used while training with distributed backend. num_return_sequences(:obj:`int`, `optional`, defaults to 1): The number of independently computed returned sequences for each element in the batch. Note that this is not the value we pass to the ``generator``'s `:func:`~transformers.PreTrainedModel.generate`` function, where we set ``num_return_sequences`` to :obj:`num_beams`. num_beams (:obj:`int`, `optional`, defaults to 1): Number of beams for beam search. 1 means no beam search. n_docs (:obj:`int`, `optional`, defaults to :obj:`config.n_docs`) Number of documents to retrieve and/or number of documents for which to generate an answer. kwargs: Additional kwargs will be passed to :meth:`~transformers.PreTrainedModel.generate` Return: :obj:`tf.Tensor` of shape :obj:`(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The second dimension (sequence length) is either equal to :obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`. """ n_docs = n_docs if n_docs is not None else self.config.n_docs do_deduplication = do_deduplication if do_deduplication is not None else self.config.do_deduplication num_doc_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences ) num_beams = num_beams if num_beams is not None else self.config.num_beams assert ( input_ids is not None or context_input_ids is not None ), " At least one of input_ids or context_input_ids must be given" if self.retriever is not None and context_input_ids is None: question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0] context_input_ids = self.retriever( input_ids, question_hidden_states.numpy(), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors="tf", )["context_input_ids"] hypos = [] model_kwargs["num_beams"] = num_beams model_kwargs["num_return_sequences"] = num_beams # put here so that not confused with num_doc_return_sequences model_kwargs["attention_mask"] = None batch_size = input_ids.shape[0] if input_ids is not None else context_input_ids.shape[0] // n_docs for index in range(batch_size): # first, generate beams from documents: generator_input_ids = context_input_ids[index * n_docs : (index + 1) * n_docs] # (n_docs, max_len) output_sequences = self.generator.generate( generator_input_ids, **model_kwargs, ) # n_docs * n_beam, tgt_len if do_deduplication: # do_deduplication -- for TF, work on Eager mode only! output_sequences = tf.stack(list({str(k.numpy().tolist()): k for k in output_sequences}.values())) num_candidates = output_sequences.shape[ 0 ] # after deduplication, this number can be less than n_docs*n_beam # then, run model forwards to get nll scores: if input_ids is not None: new_input_ids = tf.tile(input_ids[index : index + 1], (num_candidates, 1)) outputs = self(new_input_ids, labels=output_sequences, exclude_bos_score=True) else: # input_ids is None, need context_input_ids/mask and doc_scores assert ( context_attention_mask is not None ), "Make sure that `context_attention_mask` are passed, if no `input_ids` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function." assert ( doc_scores is not None ), "Make sure that `doc_scores` are passed, if no `input_ids` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function." individual_input_ids = tf.tile( generator_input_ids, (num_candidates, 1) ) # (num_candidates*n_docs, max_len) individual_attention_mask = context_attention_mask[index * n_docs : (index + 1) * n_docs] individual_attention_mask = tf.tile(individual_attention_mask, (num_candidates, 1)) individual_doc_scores = doc_scores[index : (index + 1), :] # doc_scores.shape = [batch, n_docs] individual_doc_scores = tf.tile(individual_doc_scores, (num_candidates, 1)) # [num_candidates, n_docs] outputs = self( input_ids=None, context_input_ids=individual_input_ids, context_attention_mask=individual_attention_mask, doc_scores=individual_doc_scores, labels=output_sequences, exclude_bos_score=True, ) top_cand_inds = tf.math.top_k((-outputs["loss"]), k=num_doc_return_sequences)[1] # add hypothesis hypos.append(tf.gather(output_sequences, top_cand_inds)) return self._cat_and_pad(hypos, pad_token_id=self.config.generator.pad_token_id) @staticmethod def _cat_and_pad(tensors, pad_token_id): # used by generate(): tensors is a (batched) list of (candidates, len); len is varied across batch # Initialize padded tensor with shape ( all_candidates , max_candidate_length ), # where all_candidates counted from all inputs new_shape = sum([t.shape[0] for t in tensors]), max([t.shape[1] for t in tensors]) output = tf.fill(new_shape, pad_token_id) # Normal tensor doesn't support slice assignment, so we need tf.Variable output = tf.Variable(output) # Assign, and then convert back to tensor ind = 0 for t in tensors: output[ind : ind + t.shape[0], : t.shape[1]].assign(t) ind += t.shape[0] output = tf.convert_to_tensor(output) return tf.cast(output, tensors[0][0][0].dtype)
AdaMix/src/transformers/models/rag/modeling_tf_rag.py/0
{ "file_path": "AdaMix/src/transformers/models/rag/modeling_tf_rag.py", "repo_id": "AdaMix", "token_count": 40947 }
65
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...file_utils import _BaseLazyModule, is_tokenizers_available, is_torch_available _import_structure = { "configuration_squeezebert": ["SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "SqueezeBertConfig"], "tokenization_squeezebert": ["SqueezeBertTokenizer"], } if is_tokenizers_available(): _import_structure["tokenization_squeezebert_fast"] = ["SqueezeBertTokenizerFast"] if is_torch_available(): _import_structure["modeling_squeezebert"] = [ "SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "SqueezeBertForMaskedLM", "SqueezeBertForMultipleChoice", "SqueezeBertForQuestionAnswering", "SqueezeBertForSequenceClassification", "SqueezeBertForTokenClassification", "SqueezeBertModel", "SqueezeBertModule", "SqueezeBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_squeezebert import SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig from .tokenization_squeezebert import SqueezeBertTokenizer if is_tokenizers_available(): from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast if is_torch_available(): from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import importlib import os import sys class _LazyModule(_BaseLazyModule): """ Module class that surfaces all objects but only performs associated imports when the objects are requested. """ __file__ = globals()["__file__"] __path__ = [os.path.dirname(__file__)] def _get_module(self, module_name: str): return importlib.import_module("." + module_name, self.__name__) sys.modules[__name__] = _LazyModule(__name__, _import_structure)
AdaMix/src/transformers/models/squeezebert/__init__.py/0
{ "file_path": "AdaMix/src/transformers/models/squeezebert/__init__.py", "repo_id": "AdaMix", "token_count": 1130 }
66
# coding=utf-8 # Copyright 2019 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for XLM.""" import json import os import re import sys import unicodedata from typing import List, Optional, Tuple import sacremoses as sm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/vocab.json", "xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/vocab.json", "xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/vocab.json", "xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/vocab.json", "xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/vocab.json", "xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/vocab.json", "xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/vocab.json", "xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/vocab.json", "xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/vocab.json", "xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/vocab.json", }, "merges_file": { "xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/merges.txt", "xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/merges.txt", "xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/merges.txt", "xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/merges.txt", "xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/merges.txt", "xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/merges.txt", "xlm-clm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/merges.txt", "xlm-clm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/merges.txt", "xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/merges.txt", "xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/merges.txt", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "xlm-mlm-en-2048": 512, "xlm-mlm-ende-1024": 512, "xlm-mlm-enfr-1024": 512, "xlm-mlm-enro-1024": 512, "xlm-mlm-tlm-xnli15-1024": 512, "xlm-mlm-xnli15-1024": 512, "xlm-clm-enfr-1024": 512, "xlm-clm-ende-1024": 512, "xlm-mlm-17-1280": 512, "xlm-mlm-100-1280": 512, } PRETRAINED_INIT_CONFIGURATION = { "xlm-mlm-en-2048": {"do_lowercase_and_remove_accent": True}, "xlm-mlm-ende-1024": { "do_lowercase_and_remove_accent": True, "id2lang": {0: "de", 1: "en"}, "lang2id": {"de": 0, "en": 1}, }, "xlm-mlm-enfr-1024": { "do_lowercase_and_remove_accent": True, "id2lang": {0: "en", 1: "fr"}, "lang2id": {"en": 0, "fr": 1}, }, "xlm-mlm-enro-1024": { "do_lowercase_and_remove_accent": True, "id2lang": {0: "en", 1: "ro"}, "lang2id": {"en": 0, "ro": 1}, }, "xlm-mlm-tlm-xnli15-1024": { "do_lowercase_and_remove_accent": True, "id2lang": { 0: "ar", 1: "bg", 2: "de", 3: "el", 4: "en", 5: "es", 6: "fr", 7: "hi", 8: "ru", 9: "sw", 10: "th", 11: "tr", 12: "ur", 13: "vi", 14: "zh", }, "lang2id": { "ar": 0, "bg": 1, "de": 2, "el": 3, "en": 4, "es": 5, "fr": 6, "hi": 7, "ru": 8, "sw": 9, "th": 10, "tr": 11, "ur": 12, "vi": 13, "zh": 14, }, }, "xlm-mlm-xnli15-1024": { "do_lowercase_and_remove_accent": True, "id2lang": { 0: "ar", 1: "bg", 2: "de", 3: "el", 4: "en", 5: "es", 6: "fr", 7: "hi", 8: "ru", 9: "sw", 10: "th", 11: "tr", 12: "ur", 13: "vi", 14: "zh", }, "lang2id": { "ar": 0, "bg": 1, "de": 2, "el": 3, "en": 4, "es": 5, "fr": 6, "hi": 7, "ru": 8, "sw": 9, "th": 10, "tr": 11, "ur": 12, "vi": 13, "zh": 14, }, }, "xlm-clm-enfr-1024": { "do_lowercase_and_remove_accent": True, "id2lang": {0: "en", 1: "fr"}, "lang2id": {"en": 0, "fr": 1}, }, "xlm-clm-ende-1024": { "do_lowercase_and_remove_accent": True, "id2lang": {0: "de", 1: "en"}, "lang2id": {"de": 0, "en": 1}, }, "xlm-mlm-17-1280": { "do_lowercase_and_remove_accent": False, "id2lang": { 0: "ar", 1: "de", 2: "en", 3: "es", 4: "fr", 5: "hi", 6: "it", 7: "ja", 8: "ko", 9: "nl", 10: "pl", 11: "pt", 12: "ru", 13: "sv", 14: "tr", 15: "vi", 16: "zh", }, "lang2id": { "ar": 0, "de": 1, "en": 2, "es": 3, "fr": 4, "hi": 5, "it": 6, "ja": 7, "ko": 8, "nl": 9, "pl": 10, "pt": 11, "ru": 12, "sv": 13, "tr": 14, "vi": 15, "zh": 16, }, }, "xlm-mlm-100-1280": { "do_lowercase_and_remove_accent": False, "id2lang": { 0: "af", 1: "als", 2: "am", 3: "an", 4: "ang", 5: "ar", 6: "arz", 7: "ast", 8: "az", 9: "bar", 10: "be", 11: "bg", 12: "bn", 13: "br", 14: "bs", 15: "ca", 16: "ceb", 17: "ckb", 18: "cs", 19: "cy", 20: "da", 21: "de", 22: "el", 23: "en", 24: "eo", 25: "es", 26: "et", 27: "eu", 28: "fa", 29: "fi", 30: "fr", 31: "fy", 32: "ga", 33: "gan", 34: "gl", 35: "gu", 36: "he", 37: "hi", 38: "hr", 39: "hu", 40: "hy", 41: "ia", 42: "id", 43: "is", 44: "it", 45: "ja", 46: "jv", 47: "ka", 48: "kk", 49: "kn", 50: "ko", 51: "ku", 52: "la", 53: "lb", 54: "lt", 55: "lv", 56: "mk", 57: "ml", 58: "mn", 59: "mr", 60: "ms", 61: "my", 62: "nds", 63: "ne", 64: "nl", 65: "nn", 66: "no", 67: "oc", 68: "pl", 69: "pt", 70: "ro", 71: "ru", 72: "scn", 73: "sco", 74: "sh", 75: "si", 76: "simple", 77: "sk", 78: "sl", 79: "sq", 80: "sr", 81: "sv", 82: "sw", 83: "ta", 84: "te", 85: "th", 86: "tl", 87: "tr", 88: "tt", 89: "uk", 90: "ur", 91: "uz", 92: "vi", 93: "war", 94: "wuu", 95: "yi", 96: "zh", 97: "zh_classical", 98: "zh_min_nan", 99: "zh_yue", }, "lang2id": { "af": 0, "als": 1, "am": 2, "an": 3, "ang": 4, "ar": 5, "arz": 6, "ast": 7, "az": 8, "bar": 9, "be": 10, "bg": 11, "bn": 12, "br": 13, "bs": 14, "ca": 15, "ceb": 16, "ckb": 17, "cs": 18, "cy": 19, "da": 20, "de": 21, "el": 22, "en": 23, "eo": 24, "es": 25, "et": 26, "eu": 27, "fa": 28, "fi": 29, "fr": 30, "fy": 31, "ga": 32, "gan": 33, "gl": 34, "gu": 35, "he": 36, "hi": 37, "hr": 38, "hu": 39, "hy": 40, "ia": 41, "id": 42, "is": 43, "it": 44, "ja": 45, "jv": 46, "ka": 47, "kk": 48, "kn": 49, "ko": 50, "ku": 51, "la": 52, "lb": 53, "lt": 54, "lv": 55, "mk": 56, "ml": 57, "mn": 58, "mr": 59, "ms": 60, "my": 61, "nds": 62, "ne": 63, "nl": 64, "nn": 65, "no": 66, "oc": 67, "pl": 68, "pt": 69, "ro": 70, "ru": 71, "scn": 72, "sco": 73, "sh": 74, "si": 75, "simple": 76, "sk": 77, "sl": 78, "sq": 79, "sr": 80, "sv": 81, "sw": 82, "ta": 83, "te": 84, "th": 85, "tl": 86, "tr": 87, "tt": 88, "uk": 89, "ur": 90, "uz": 91, "vi": 92, "war": 93, "wuu": 94, "yi": 95, "zh": 96, "zh_classical": 97, "zh_min_nan": 98, "zh_yue": 99, }, }, } def get_pairs(word): """ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length strings) """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs def lowercase_and_remove_accent(text): """ Lowercase and strips accents from a piece of text based on https://github.com/facebookresearch/XLM/blob/master/tools/lowercase_and_remove_accent.py """ text = " ".join(text) text = text.lower() text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output).lower().split(" ") def replace_unicode_punct(text): """ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl """ text = text.replace(",", ",") text = re.sub(r"。\s*", ". ", text) text = text.replace("、", ",") text = text.replace("”", '"') text = text.replace("“", '"') text = text.replace("∶", ":") text = text.replace(":", ":") text = text.replace("?", "?") text = text.replace("《", '"') text = text.replace("》", '"') text = text.replace(")", ")") text = text.replace("!", "!") text = text.replace("(", "(") text = text.replace(";", ";") text = text.replace("1", "1") text = text.replace("」", '"') text = text.replace("「", '"') text = text.replace("0", "0") text = text.replace("3", "3") text = text.replace("2", "2") text = text.replace("5", "5") text = text.replace("6", "6") text = text.replace("9", "9") text = text.replace("7", "7") text = text.replace("8", "8") text = text.replace("4", "4") text = re.sub(r".\s*", ". ", text) text = text.replace("~", "~") text = text.replace("’", "'") text = text.replace("…", "...") text = text.replace("━", "-") text = text.replace("〈", "<") text = text.replace("〉", ">") text = text.replace("【", "[") text = text.replace("】", "]") text = text.replace("%", "%") return text def remove_non_printing_char(text): """ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl """ output = [] for char in text: cat = unicodedata.category(char) if cat.startswith("C"): continue output.append(char) return "".join(output) def romanian_preprocessing(text): """Sennrich's WMT16 scripts for Romanian preprocessing, used by model `xlm-mlm-enro-1024`""" # https://github.com/rsennrich/wmt16-scripts/blob/master/preprocess/normalise-romanian.py text = text.replace("\u015e", "\u0218").replace("\u015f", "\u0219") text = text.replace("\u0162", "\u021a").replace("\u0163", "\u021b") # https://github.com/rsennrich/wmt16-scripts/blob/master/preprocess/remove-diacritics.py text = text.replace("\u0218", "S").replace("\u0219", "s") # s-comma text = text.replace("\u021a", "T").replace("\u021b", "t") # t-comma text = text.replace("\u0102", "A").replace("\u0103", "a") text = text.replace("\u00C2", "A").replace("\u00E2", "a") text = text.replace("\u00CE", "I").replace("\u00EE", "i") return text class XLMTokenizer(PreTrainedTokenizer): """ Construct an XLM tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following: - Moses preprocessing and tokenization for most supported languages. - Language specific tokenization for Chinese (Jieba), Japanese (KyTea) and Thai (PyThaiNLP). - Optionally lowercases and normalizes all inputs text. - The arguments ``special_tokens`` and the function ``set_special_tokens``, can be used to add additional symbols (like "__classify__") to a vocabulary. - The :obj:`lang2id` attribute maps the languages supported by the model with their IDs if provided (automatically set for pretrained vocabularies). - The :obj:`id2lang` attributes does reverse mapping if provided (automatically set for pretrained vocabularies). This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (:obj:`str`): Vocabulary file. merges_file (:obj:`str`): Merges file. unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. .. note:: When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the :obj:`cls_token`. sep_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`): The token used for padding, for example when batching sequences of different lengths. cls_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (:obj:`str`, `optional`, defaults to :obj:`"<special1>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. additional_special_tokens (:obj:`List[str]`, `optional`, defaults to :obj:`["<special0>","<special1>","<special2>","<special3>","<special4>","<special5>","<special6>","<special7>","<special8>","<special9>"]`): List of additional special tokens. lang2id (:obj:`Dict[str, int]`, `optional`): Dictionary mapping languages string identifiers to their IDs. id2lang (:obj:`Dict[int, str]`, `optional`): Dictionary mapping language IDs to their string identifiers. do_lowercase_and_remove_accent (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to lowercase and remove accents when tokenizing. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, vocab_file, merges_file, unk_token="<unk>", bos_token="<s>", sep_token="</s>", pad_token="<pad>", cls_token="</s>", mask_token="<special1>", additional_special_tokens=[ "<special0>", "<special1>", "<special2>", "<special3>", "<special4>", "<special5>", "<special6>", "<special7>", "<special8>", "<special9>", ], lang2id=None, id2lang=None, do_lowercase_and_remove_accent=True, **kwargs ): super().__init__( unk_token=unk_token, bos_token=bos_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, lang2id=lang2id, id2lang=id2lang, do_lowercase_and_remove_accent=do_lowercase_and_remove_accent, **kwargs, ) # cache of sm.MosesPunctNormalizer instance self.cache_moses_punct_normalizer = dict() # cache of sm.MosesTokenizer instance self.cache_moses_tokenizer = dict() self.lang_with_custom_tokenizer = set(["zh", "th", "ja"]) # True for current supported model (v1.2.0), False for XLM-17 & 100 self.do_lowercase_and_remove_accent = do_lowercase_and_remove_accent self.lang2id = lang2id self.id2lang = id2lang if lang2id is not None and id2lang is not None: assert len(lang2id) == len(id2lang) self.ja_word_tokenizer = None self.zh_word_tokenizer = None with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} with open(merges_file, encoding="utf-8") as merges_handle: merges = merges_handle.read().split("\n")[:-1] merges = [tuple(merge.split()[:2]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} @property def do_lower_case(self): return self.do_lowercase_and_remove_accent def moses_punct_norm(self, text, lang): if lang not in self.cache_moses_punct_normalizer: punct_normalizer = sm.MosesPunctNormalizer(lang=lang) self.cache_moses_punct_normalizer[lang] = punct_normalizer else: punct_normalizer = self.cache_moses_punct_normalizer[lang] return punct_normalizer.normalize(text) def moses_tokenize(self, text, lang): if lang not in self.cache_moses_tokenizer: moses_tokenizer = sm.MosesTokenizer(lang=lang) self.cache_moses_tokenizer[lang] = moses_tokenizer else: moses_tokenizer = self.cache_moses_tokenizer[lang] return moses_tokenizer.tokenize(text, return_str=False, escape=False) def moses_pipeline(self, text, lang): text = replace_unicode_punct(text) text = self.moses_punct_norm(text, lang) text = remove_non_printing_char(text) return text def ja_tokenize(self, text): if self.ja_word_tokenizer is None: try: import Mykytea self.ja_word_tokenizer = Mykytea.Mykytea( "-model %s/local/share/kytea/model.bin" % os.path.expanduser("~") ) except (AttributeError, ImportError): logger.error( "Make sure you install KyTea (https://github.com/neubig/kytea) and it's python wrapper (https://github.com/chezou/Mykytea-python) with the following steps" ) logger.error("1. git clone git@github.com:neubig/kytea.git && cd kytea") logger.error("2. autoreconf -i") logger.error("3. ./configure --prefix=$HOME/local") logger.error("4. make && make install") logger.error("5. pip install kytea") raise return list(self.ja_word_tokenizer.getWS(text)) @property def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + "</w>",) if token in self.cache: return self.cache[token] pairs = get_pairs(word) if not pairs: return token + "</w>" while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) if word == "\n </w>": word = "\n</w>" self.cache[token] = word return word def _tokenize(self, text, lang="en", bypass_tokenizer=False): """ Tokenize a string given language code. For Chinese, Japanese and Thai, we use a language specific tokenizerself. Otherwise, we use Moses. Details of tokenization: - [sacremoses](https://github.com/alvations/sacremoses): port of Moses - Install with `pip install sacremoses` - [pythainlp](https://github.com/PyThaiNLP/pythainlp): Thai tokenizer - Install with `pip install pythainlp` - [kytea](https://github.com/chezou/Mykytea-python): Japanese tokenizer, wrapper of [KyTea](https://github.com/neubig/kytea) - Install with the following steps: :: git clone git@github.com:neubig/kytea.git && cd kytea autoreconf -i ./configure --prefix=$HOME/local make && make install pip install kytea - [jieba](https://github.com/fxsjy/jieba): Chinese tokenizer (*) - Install with `pip install jieba` (*) The original XLM used [Stanford Segmenter](https://nlp.stanford.edu/software/stanford-segmenter-2018-10-16.zip). However, the wrapper (`nltk.tokenize.stanford_segmenter`) is slow due to JVM overhead, and it will be deprecated. Jieba is a lot faster and pip-installable. Note there is some mismatch with the Stanford Segmenter. It should be fine if you fine-tune the model with Chinese supervisionself. If you want the same exact behaviour, use the original XLM [preprocessing script](https://github.com/facebookresearch/XLM/tree/master/tools) to tokenize the sentence externally, and set `bypass_tokenizer=True` to bypass the tokenizer. Args: - lang: ISO language code (default = 'en') (string). Languages should belong of the model supported languages. However, we don't enforce it. - bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False) (bool). If True, we only apply BPE. Returns: List of tokens. """ if lang and self.lang2id and lang not in self.lang2id: logger.error( "Supplied language code not found in lang2id mapping. Please check that your language is supported by the loaded pretrained model." ) if bypass_tokenizer: text = text.split() elif lang not in self.lang_with_custom_tokenizer: text = self.moses_pipeline(text, lang=lang) # TODO: make sure we are using `xlm-mlm-enro-1024`, since XLM-100 doesn't have this step if lang == "ro": text = romanian_preprocessing(text) text = self.moses_tokenize(text, lang=lang) elif lang == "th": text = self.moses_pipeline(text, lang=lang) try: if "pythainlp" not in sys.modules: from pythainlp.tokenize import word_tokenize as th_word_tokenize else: th_word_tokenize = sys.modules["pythainlp"].word_tokenize except (AttributeError, ImportError): logger.error( "Make sure you install PyThaiNLP (https://github.com/PyThaiNLP/pythainlp) with the following steps" ) logger.error("1. pip install pythainlp") raise text = th_word_tokenize(text) elif lang == "zh": try: if "jieba" not in sys.modules: import jieba else: jieba = sys.modules["jieba"] except (AttributeError, ImportError): logger.error("Make sure you install Jieba (https://github.com/fxsjy/jieba) with the following steps") logger.error("1. pip install jieba") raise text = " ".join(jieba.cut(text)) text = self.moses_pipeline(text, lang=lang) text = text.split() elif lang == "ja": text = self.moses_pipeline(text, lang=lang) text = self.ja_tokenize(text) else: raise ValueError("It should not reach here") if self.do_lowercase_and_remove_accent and not bypass_tokenizer: text = lowercase_and_remove_accent(text) split_tokens = [] for token in text: if token: split_tokens.extend([t for t in self.bpe(token).split(" ")]) return split_tokens def _convert_token_to_id(self, token): """ Converts a token (str) in an id using the vocab. """ return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. """ out_string = "".join(tokens).replace("</w>", " ").strip() return out_string def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM sequence has the following format: - single sequence: ``<s> X </s>`` - pair of sequences: ``<s> A </s> B </s>`` Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. """ bos = [self.bos_token_id] sep = [self.sep_token_id] if token_ids_1 is None: return bos + token_ids_0 + sep return bos + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` method. Args: token_ids_0 (:obj:`List[int]`): List of IDs. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the token list is already formatted with special tokens for the model. Returns: :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return list( map( lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0, ) ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLM sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (:obj:`List[int]`): List of IDs. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) merge_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, ensure_ascii=False)) index = 0 with open(merge_file, "w", encoding="utf-8") as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( "Saving vocabulary to {}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!".format(merge_file) ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return vocab_file, merge_file
AdaMix/src/transformers/models/xlm/tokenization_xlm.py/0
{ "file_path": "AdaMix/src/transformers/models/xlm/tokenization_xlm.py", "repo_id": "AdaMix", "token_count": 18277 }
67
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch optimization for BERT model.""" import math from typing import Callable, Iterable, Optional, Tuple, Union import torch from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .trainer_utils import SchedulerType from .utils import logging logger = logging.get_logger(__name__) def get_constant_schedule(optimizer: Optimizer, last_epoch: int = -1): """ Create a schedule with a constant learning rate, using the learning rate set in optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ return LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch) def get_constant_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, last_epoch: int = -1): """ Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ def lr_lambda(current_step: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1.0, num_warmup_steps)) return 1.0 return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): """ Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. num_training_steps (:obj:`int`): The total number of training steps. last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ def lr_lambda(current_step: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) return max( 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)) ) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_cosine_schedule_with_warmup( optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1 ): """ Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. num_training_steps (:obj:`int`): The total number of training steps. num_cycles (:obj:`float`, `optional`, defaults to 0.5): The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine). last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_cosine_with_hard_restarts_schedule_with_warmup( optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int = 1, last_epoch: int = -1 ): """ Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. num_training_steps (:obj:`int`): The total number of training steps. num_cycles (:obj:`int`, `optional`, defaults to 1): The number of hard restarts to use. last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) if progress >= 1.0: return 0.0 return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0)))) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_polynomial_decay_schedule_with_warmup( optimizer, num_warmup_steps, num_training_steps, lr_end=1e-7, power=1.0, last_epoch=-1 ): """ Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the optimizer to end lr defined by `lr_end`, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. num_training_steps (:obj:`int`): The total number of training steps. lr_end (:obj:`float`, `optional`, defaults to 1e-7): The end LR. power (:obj:`float`, `optional`, defaults to 1.0): Power factor. last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Note: `power` defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT implementation at https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37 Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ lr_init = optimizer.defaults["lr"] assert lr_init > lr_end, f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" def lr_lambda(current_step: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: lr_range = lr_init - lr_end decay_steps = num_training_steps - num_warmup_steps pct_remaining = 1 - (current_step - num_warmup_steps) / decay_steps decay = lr_range * pct_remaining ** power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(optimizer, lr_lambda, last_epoch) TYPE_TO_SCHEDULER_FUNCTION = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, } def get_scheduler( name: Union[str, SchedulerType], optimizer: Optimizer, num_warmup_steps: Optional[int] = None, num_training_steps: Optional[int] = None, ): """ Unified API to get any scheduler from its name. Args: name (:obj:`str` or `:obj:`SchedulerType`): The name of the scheduler to use. optimizer (:obj:`torch.optim.Optimizer`): The optimizer that will be used during training. num_warmup_steps (:obj:`int`, `optional`): The number of warmup steps to do. This is not required by all schedulers (hence the argument being optional), the function will raise an error if it's unset and the scheduler type requires it. num_training_steps (:obj:`int`, `optional`): The number of training steps to do. This is not required by all schedulers (hence the argument being optional), the function will raise an error if it's unset and the scheduler type requires it. """ name = SchedulerType(name) schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(optimizer) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.") if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(optimizer, num_warmup_steps=num_warmup_steps) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"{name} requires `num_training_steps`, please provide that argument.") return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps) class AdamW(Optimizer): """ Implements Adam algorithm with weight decay fix as introduced in `Decoupled Weight Decay Regularization <https://arxiv.org/abs/1711.05101>`__. Parameters: params (:obj:`Iterable[torch.nn.parameter.Parameter]`): Iterable of parameters to optimize or dictionaries defining parameter groups. lr (:obj:`float`, `optional`, defaults to 1e-3): The learning rate to use. betas (:obj:`Tuple[float,float]`, `optional`, defaults to (0.9, 0.999)): Adam's betas parameters (b1, b2). eps (:obj:`float`, `optional`, defaults to 1e-6): Adam's epsilon for numerical stability. weight_decay (:obj:`float`, `optional`, defaults to 0): Decoupled weight decay to apply. correct_bias (:obj:`bool`, `optional`, defaults to `True`): Whether ot not to correct bias in Adam (for instance, in Bert TF repository they use :obj:`False`). """ def __init__( self, params: Iterable[torch.nn.parameter.Parameter], lr: float = 1e-3, betas: Tuple[float, float] = (0.9, 0.999), eps: float = 1e-6, weight_decay: float = 0.0, correct_bias: bool = True, ): if lr < 0.0: raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1])) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps)) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias) super().__init__(params, defaults) def step(self, closure: Callable = None): """ Performs a single optimization step. Arguments: closure (:obj:`Callable`, `optional`): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead") state = self.state[p] # State initialization if len(state) == 0: state["step"] = 0 # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.zeros_like(p.data) exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] beta1, beta2 = group["betas"] state["step"] += 1 # Decay the first and second moment running average coefficient # In-place operations to update the averages at the same time exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2) denom = exp_avg_sq.sqrt().add_(group["eps"]) step_size = group["lr"] if group["correct_bias"]: # No bias correction for Bert bias_correction1 = 1.0 - beta1 ** state["step"] bias_correction2 = 1.0 - beta2 ** state["step"] step_size = step_size * math.sqrt(bias_correction2) / bias_correction1 p.data.addcdiv_(exp_avg, denom, value=-step_size) # Just adding the square of the weights to the loss function is *not* # the correct way of using L2 regularization/weight decay with Adam, # since that will interact with the m and v parameters in strange ways. # # Instead we want to decay the weights in a manner that doesn't interact # with the m/v parameters. This is equivalent to adding the square # of the weights to the loss with plain (non-momentum) SGD. # Add weight decay at the end (fixed version) if group["weight_decay"] > 0.0: p.data.add_(p.data, alpha=-group["lr"] * group["weight_decay"]) return loss class Adafactor(Optimizer): """ AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code: https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py Paper: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost` https://arxiv.org/abs/1804.04235 Note that this optimizer internally adjusts the learning rate depending on the *scale_parameter*, *relative_step* and *warmup_init* options. To use a manual (external) learning rate schedule you should set `scale_parameter=False` and `relative_step=False`. Arguments: params (:obj:`Iterable[torch.nn.parameter.Parameter]`): Iterable of parameters to optimize or dictionaries defining parameter groups. lr (:obj:`float`, `optional`): The external learning rate. eps (:obj:`Tuple[float, float]`, `optional`, defaults to (1e-30, 1e-3)): Regularization constants for square gradient and parameter scale respectively clip_threshold (:obj:`float`, `optional`, defaults 1.0): Threshold of root mean square of final gradient update decay_rate (:obj:`float`, `optional`, defaults to -0.8): Coefficient used to compute running averages of square beta1 (:obj:`float`, `optional`): Coefficient used for computing running averages of gradient weight_decay (:obj:`float`, `optional`, defaults to 0): Weight decay (L2 penalty) scale_parameter (:obj:`bool`, `optional`, defaults to :obj:`True`): If True, learning rate is scaled by root mean square relative_step (:obj:`bool`, `optional`, defaults to :obj:`True`): If True, time-dependent learning rate is computed instead of external learning rate warmup_init (:obj:`bool`, `optional`, defaults to :obj:`False`): Time-dependent learning rate computation depends on whether warm-up initialization is being used This implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested. Recommended T5 finetuning settings: - Scheduled LR warm-up to fixed LR - disable relative updates - use clip threshold: https://arxiv.org/abs/2004.14546 Example:: Adafactor(model.parameters(), lr=1e-3, relative_step=False, warmup_init=True) - Alternatively, relative_step with warmup_init can be used. - Training without LR warmup or clip threshold is not recommended. Additional optimizer operations like gradient clipping should not be used alongside Adafactor. Usage:: # replace AdamW with Adafactor optimizer = Adafactor( model.parameters(), lr=1e-3, eps=(1e-30, 1e-3), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, relative_step=False, scale_parameter=False, warmup_init=False ) """ def __init__( self, params, lr=None, eps=(1e-30, 1e-3), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, scale_parameter=True, relative_step=True, warmup_init=False, ): if lr is not None and relative_step: raise ValueError("Cannot combine manual lr and relative_step options") if warmup_init and not relative_step: raise ValueError("warmup_init requires relative_step=True") defaults = dict( lr=lr, eps=eps, clip_threshold=clip_threshold, decay_rate=decay_rate, beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, relative_step=relative_step, warmup_init=warmup_init, ) super().__init__(params, defaults) @staticmethod def _get_lr(param_group, param_state): rel_step_sz = param_group["lr"] if param_group["relative_step"]: min_step = 1e-6 * param_state["step"] if param_group["warmup_init"] else 1e-2 rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"])) param_scale = 1.0 if param_group["scale_parameter"]: param_scale = max(param_group["eps"][1], param_state["RMS"]) return param_scale * rel_step_sz @staticmethod def _get_options(param_group, param_shape): factored = len(param_shape) >= 2 use_first_moment = param_group["beta1"] is not None return factored, use_first_moment @staticmethod def _rms(tensor): return tensor.norm(2) / (tensor.numel() ** 0.5) @staticmethod def _approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col): r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_() c_factor = exp_avg_sq_col.rsqrt() return torch.mm(r_factor.unsqueeze(-1), c_factor.unsqueeze(0)) def step(self, closure=None): """ Performs a single optimization step Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad.data if grad.dtype in {torch.float16, torch.bfloat16}: grad = grad.float() if grad.is_sparse: raise RuntimeError("Adafactor does not support sparse gradients.") state = self.state[p] grad_shape = grad.shape factored, use_first_moment = self._get_options(group, grad_shape) # State Initialization if len(state) == 0: state["step"] = 0 if use_first_moment: # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(grad) if factored: state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1]).to(grad) state["exp_avg_sq_col"] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad) else: state["exp_avg_sq"] = torch.zeros_like(grad) state["RMS"] = 0 else: if use_first_moment: state["exp_avg"] = state["exp_avg"].to(grad) if factored: state["exp_avg_sq_row"] = state["exp_avg_sq_row"].to(grad) state["exp_avg_sq_col"] = state["exp_avg_sq_col"].to(grad) else: state["exp_avg_sq"] = state["exp_avg_sq"].to(grad) p_data_fp32 = p.data if p.data.dtype in {torch.float16, torch.bfloat16}: p_data_fp32 = p_data_fp32.float() state["step"] += 1 state["RMS"] = self._rms(p_data_fp32) lr = self._get_lr(group, state) beta2t = 1.0 - math.pow(state["step"], group["decay_rate"]) update = (grad ** 2) + group["eps"][0] if factored: exp_avg_sq_row = state["exp_avg_sq_row"] exp_avg_sq_col = state["exp_avg_sq_col"] exp_avg_sq_row.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-1)) exp_avg_sq_col.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-2)) # Approximation of exponential moving average of square of gradient update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) update.mul_(grad) else: exp_avg_sq = state["exp_avg_sq"] exp_avg_sq.mul_(beta2t).add_(1.0 - beta2t, update) update = exp_avg_sq.rsqrt().mul_(grad) update.div_((self._rms(update) / group["clip_threshold"]).clamp_(min=1.0)) update.mul_(lr) if use_first_moment: exp_avg = state["exp_avg"] exp_avg.mul_(group["beta1"]).add_(1 - group["beta1"], update) update = exp_avg if group["weight_decay"] != 0: p_data_fp32.add_(-group["weight_decay"] * lr, p_data_fp32) p_data_fp32.add_(-update) if p.data.dtype in {torch.float16, torch.bfloat16}: p.data.copy_(p_data_fp32) return loss
AdaMix/src/transformers/optimization.py/0
{ "file_path": "AdaMix/src/transformers/optimization.py", "repo_id": "AdaMix", "token_count": 10987 }
68
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib.util from dataclasses import dataclass, field import torch from transformers.file_utils import cached_property, is_sagemaker_distributed_available from transformers.training_args import TrainingArguments from transformers.utils import logging logger = logging.get_logger(__name__) def is_smdistributed_available(): return importlib.util.find_spec("smdistributed") is not None if is_smdistributed_available(): import smdistributed.modelparallel.torch as smp @dataclass class SageMakerTrainingArguments(TrainingArguments): mp_parameters: str = field( default="", metadata={"help": "Used by the SageMaker launcher to send mp-specific args."} ) def __post_init__(self): super().__post_init__() if is_smdistributed_available() and self.mp_parameters != "": smp.init() @cached_property def _setup_devices(self) -> "torch.device": logger.info("PyTorch: setting up devices") if self.no_cuda: device = torch.device("cpu") self._n_gpu = 0 elif is_smdistributed_available() and self.mp_parameters != "": local_rank = smp.local_rank() device = torch.device("cuda", local_rank) self._n_gpu = 1 elif is_sagemaker_distributed_available(): import smdistributed.dataparallel.torch.distributed as dist dist.init_process_group() self.local_rank = dist.get_local_rank() device = torch.device("cuda", self.local_rank) self._n_gpu = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. self._n_gpu = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs torch.distributed.init_process_group(backend="nccl") device = torch.device("cuda", self.local_rank) self._n_gpu = 1 if device.type == "cuda": torch.cuda.set_device(device) return device @property def world_size(self): if is_smdistributed_available() and self.mp_parameters != "": return smp.dp_size() return super().world_size @property def place_model_on_device(self): return not (is_smdistributed_available() and self.mp_parameters != "") @property def _no_sync_in_gradient_accumulation(self): return False
AdaMix/src/transformers/sagemaker/training_args_sm.py/0
{ "file_path": "AdaMix/src/transformers/sagemaker/training_args_sm.py", "repo_id": "AdaMix", "token_count": 1470 }
69
# coding=utf-8 # Copyright {{cookiecutter.authors}} and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for {{cookiecutter.modelname}}.""" {%- if cookiecutter.tokenizer_type == "Based on BERT" %} from ...utils import logging from ..bert.tokenization_bert import BertTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/vocab.txt", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "{{cookiecutter.checkpoint_identifier}}": 512, } PRETRAINED_INIT_CONFIGURATION = { "{{cookiecutter.checkpoint_identifier}}": {"do_lower_case": False}, } class {{cookiecutter.camelcase_modelname}}Tokenizer(BertTokenizer): r""" Construct a {{cookiecutter.modelname}} tokenizer. :class:`~transformers.{{cookiecutter.camelcase_modelname}}Tokenizer` is identical to :class:`~transformers.BertTokenizer` and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass :class:`~transformers.BertTokenizer` for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION {%- elif cookiecutter.tokenizer_type == "Based on BART" %} from ...utils import logging from ..bart.tokenization_bart import BartTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/vocab.json", }, "merges_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/merges.txt", }, "tokenizer_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/tokenizer.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "{{cookiecutter.checkpoint_identifier}}": 1024, } class {{cookiecutter.camelcase_modelname}}Tokenizer(BartTokenizer): """ Construct a {{cookiecutter.modelname}} tokenizer. :class:`~transformers.{{cookiecutter.camelcase_modelname}}Tokenizer` is identical to :class:`~transformers.BartTokenizer` and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass :class:`~transformers.BartTokenizer` for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES {%- elif cookiecutter.tokenizer_type == "Standalone" %} from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/vocab.txt", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "{{cookiecutter.checkpoint_identifier}}": 1024, } class {{cookiecutter.camelcase_modelname}}Tokenizer(PreTrainedTokenizer): """ Construct a {{cookiecutter.modelname}} tokenizer. Based on byte-level Byte-Pair-Encoding. Args: vocab_file (:obj:`str`): Path to the vocabulary file. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, unk_token="<|endoftext|>", bos_token="<|endoftext|>", eos_token="<|endoftext|>", **kwargs ): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs) "Initialisation" @property def vocab_size(self): "Returns vocab size" def get_vocab(self): "Returns vocab as a dict" def _tokenize(self, text): """ Returns a tokenized string. """ def _convert_token_to_id(self, token): """ Converts a token (str) in an id using the vocab. """ def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. """ def save_vocabulary(self, save_directory): """ Save the vocabulary and special tokens file to a directory. Args: save_directory (:obj:`str`): The directory in which to save the vocabulary. Returns: :obj:`Tuple(str)`: Paths to the files saved. """ def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A {{cookiecutter.modelname}} sequence has the following format: - single sequence: ``<s> X </s>`` - pair of sequences: ``<s> A </s></s> B </s>`` Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` method. Args: token_ids_0 (:obj:`List[int]`): List of IDs. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the token list is already formatted with special tokens for the model. Returns: :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. {{cookiecutter.modelname}} does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (:obj:`List[int]`): List of IDs. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): text = " " + text return (text, kwargs) class {{cookiecutter.camelcase_modelname}}TokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" {{cookiecutter.modelname}} tokenizer (backed by HuggingFace's `tokenizers` library). Args: vocab_file (:obj:`str`): Path to the vocabulary file. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, merges_file, unk_token="<|endoftext|>", bos_token="<|endoftext|>", eos_token="<|endoftext|>", add_prefix_space=False, trim_offsets=True, **kwargs ): super().__init__( ByteLevelBPETokenizer( vocab_file=vocab_file, merges_file=merges_file, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, ), bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs, ) self.add_prefix_space = add_prefix_space def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id] if token_ids_1 is None: return output return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. {{cookiecutter.modelname}} does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (:obj:`List[int]`): List of IDs. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] {% endif %}
AdaMix/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/tokenization_{{cookiecutter.lowercase_modelname}}.py/0
{ "file_path": "AdaMix/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/tokenization_{{cookiecutter.lowercase_modelname}}.py", "repo_id": "AdaMix", "token_count": 5419 }
70
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import requests # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME, filename_to_url, get_from_cache, hf_bucket_url from transformers.testing_utils import DUMMY_UNKWOWN_IDENTIFIER MODEL_ID = DUMMY_UNKWOWN_IDENTIFIER # An actual model hosted on huggingface.co REVISION_ID_DEFAULT = "main" # Default branch name REVISION_ID_ONE_SPECIFIC_COMMIT = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2" # One particular commit (not the top of `main`) REVISION_ID_INVALID = "aaaaaaa" # This commit does not exist, so we should 404. PINNED_SHA1 = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684" # Sha-1 of config.json on the top of `main`, for checking purposes PINNED_SHA256 = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3" # Sha-256 of pytorch_model.bin on the top of `main`, for checking purposes class GetFromCacheTests(unittest.TestCase): def test_bogus_url(self): # This lets us simulate no connection # as the error raised is the same # `ConnectionError` url = "https://bogus" with self.assertRaisesRegex(ValueError, "Connection error"): _ = get_from_cache(url) def test_file_not_found(self): # Valid revision (None) but missing file. url = hf_bucket_url(MODEL_ID, filename="missing.bin") with self.assertRaisesRegex(requests.exceptions.HTTPError, "404 Client Error"): _ = get_from_cache(url) def test_revision_not_found(self): # Valid file but missing revision url = hf_bucket_url(MODEL_ID, filename=CONFIG_NAME, revision=REVISION_ID_INVALID) with self.assertRaisesRegex(requests.exceptions.HTTPError, "404 Client Error"): _ = get_from_cache(url) def test_standard_object(self): url = hf_bucket_url(MODEL_ID, filename=CONFIG_NAME, revision=REVISION_ID_DEFAULT) filepath = get_from_cache(url, force_download=True) metadata = filename_to_url(filepath) self.assertEqual(metadata, (url, f'"{PINNED_SHA1}"')) def test_standard_object_rev(self): # Same object, but different revision url = hf_bucket_url(MODEL_ID, filename=CONFIG_NAME, revision=REVISION_ID_ONE_SPECIFIC_COMMIT) filepath = get_from_cache(url, force_download=True) metadata = filename_to_url(filepath) self.assertNotEqual(metadata[1], f'"{PINNED_SHA1}"') # Caution: check that the etag is *not* equal to the one from `test_standard_object` def test_lfs_object(self): url = hf_bucket_url(MODEL_ID, filename=WEIGHTS_NAME, revision=REVISION_ID_DEFAULT) filepath = get_from_cache(url, force_download=True) metadata = filename_to_url(filepath) self.assertEqual(metadata, (url, f'"{PINNED_SHA256}"'))
AdaMix/tests/test_file_utils.py/0
{ "file_path": "AdaMix/tests/test_file_utils.py", "repo_id": "AdaMix", "token_count": 1342 }
71
# coding=utf-8 # Copyright 2021, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Blenderbot model. """ import tempfile import unittest from transformers import is_torch_available from transformers.file_utils import cached_property from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from .test_configuration_common import ConfigTester from .test_generation_utils import GenerationTesterMixin from .test_modeling_common import ModelTesterMixin, ids_tensor if is_torch_available(): import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotTokenizer from transformers.models.blenderbot.modeling_blenderbot import ( BlenderbotDecoder, BlenderbotEncoder, BlenderbotForCausalLM, ) def prepare_blenderbot_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_torch class BlenderbotModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp( 3, ) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = BlenderbotConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, ) inputs_dict = prepare_blenderbot_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = BlenderbotModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] head_mask = inputs_dict["head_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = BlenderbotModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = BlenderbotEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = BlenderbotDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=inputs_dict["attention_mask"], )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class BlenderbotModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (BlenderbotModel, BlenderbotForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (BlenderbotForConditionalGeneration,) if is_torch_available() else () is_encoder_decoder = True test_pruning = False test_missing_keys = False def setUp(self): self.model_tester = BlenderbotModelTester(self) self.config_tester = ConfigTester(self, config_class=BlenderbotConfig) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = BlenderbotForConditionalGeneration(config).eval().to(torch_device) if torch_device == "cuda": model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def assert_tensors_close(a, b, atol=1e-12, prefix=""): """If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() if a.numel() > 100: msg = f"tensor values are {pct_different:.1%} percent different." else: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg) @unittest.skipUnless(torch_device != "cpu", "3B test too slow on CPU.") @require_torch @require_sentencepiece @require_tokenizers class Blenderbot3BIntegrationTests(unittest.TestCase): ckpt = "facebook/blenderbot-3B" @cached_property def tokenizer(self): return BlenderbotTokenizer.from_pretrained(self.ckpt) @slow def test_generation_from_short_input_same_as_parlai_3B(self): FASTER_GEN_KWARGS = dict(num_beams=1, early_stopping=True, min_length=15, max_length=25) TOK_DECODE_KW = dict(skip_special_tokens=True, clean_up_tokenization_spaces=True) torch.cuda.empty_cache() model = BlenderbotForConditionalGeneration.from_pretrained(self.ckpt).half().to(torch_device) src_text = ["Sam"] model_inputs = self.tokenizer(src_text, return_tensors="pt").to(torch_device) generated_utterances = model.generate(**model_inputs, **FASTER_GEN_KWARGS) tgt_text = 'Sam is a great name. It means "sun" in Gaelic.' generated_txt = self.tokenizer.batch_decode(generated_utterances, **TOK_DECODE_KW) assert generated_txt[0].strip() == tgt_text src_text = "Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like i'm going to throw up.\nand why is that?" model_inputs = self.tokenizer([src_text], return_tensors="pt").to(torch_device) generated_ids = model.generate(**model_inputs, **FASTER_GEN_KWARGS)[0] reply = self.tokenizer.decode(generated_ids, **TOK_DECODE_KW) assert "I think it's because we are so worried about what people think of us." == reply.strip() del model class BlenderbotStandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, d_model=16, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=4, encoder_attention_heads=4, decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=False, encoder_no_repeat_ngram_size=0, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.is_encoder_decoder = is_encoder_decoder self.encoder_no_repeat_ngram_size = encoder_no_repeat_ngram_size self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = BlenderbotConfig( vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_attention_heads=self.encoder_attention_heads, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, is_encoder_decoder=self.is_encoder_decoder, encoder_no_repeat_ngram_size=self.encoder_no_repeat_ngram_size, ) return ( config, input_ids, attention_mask, lm_labels, ) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = BlenderbotDecoder(config=config).to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, attention_mask, lm_labels, ): model = BlenderbotDecoder(config=config).to(torch_device).eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] # past_key_values = model(input_ids, use_cache=True)["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values, attention_mask=attn_mask)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class BlenderbotStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (BlenderbotDecoder, BlenderbotForCausalLM) if is_torch_available() else () all_generative_model_classes = (BlenderbotForCausalLM,) if is_torch_available() else () test_pruning = False is_encoder_decoder = False def setUp( self, ): self.model_tester = BlenderbotStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=BlenderbotConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_attn_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients return
AdaMix/tests/test_modeling_blenderbot.py/0
{ "file_path": "AdaMix/tests/test_modeling_blenderbot.py", "repo_id": "AdaMix", "token_count": 9372 }
72
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team, Microsoft Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import MPNetConfig, is_tf_available from transformers.testing_utils import require_tf, slow from .test_configuration_common import ConfigTester from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf from transformers.models.mpnet.modeling_tf_mpnet import ( TFMPNetForMaskedLM, TFMPNetForMultipleChoice, TFMPNetForQuestionAnswering, TFMPNetForSequenceClassification, TFMPNetForTokenClassification, TFMPNetModel, ) class TFMPNetModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=64, num_hidden_layers=5, num_attention_heads=4, intermediate_size=64, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = MPNetConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_mpnet_model( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFMPNetModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_mpnet_for_masked_lm( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFMPNetForMaskedLM(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_mpnet_for_question_answering( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFMPNetForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, } result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_mpnet_for_sequence_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFMPNetForSequenceClassification(config) inputs = {"input_ids": input_ids, "attention_mask": input_mask} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_mpnet_for_multiple_choice( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFMPNetForMultipleChoice(config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_mpnet_for_token_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFMPNetForTokenClassification(config) inputs = {"input_ids": input_ids, "attention_mask": input_mask} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFMPNetModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = ( ( TFMPNetForMaskedLM, TFMPNetForMultipleChoice, TFMPNetForQuestionAnswering, TFMPNetForSequenceClassification, TFMPNetForTokenClassification, TFMPNetModel, ) if is_tf_available() else () ) test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFMPNetModelTester(self) self.config_tester = ConfigTester(self, config_class=MPNetConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_mpnet_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in ["microsoft/mpnet-base"]: model = TFMPNetModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_tf class TFMPNetModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TFMPNetModel.from_pretrained("microsoft/mpnet-base") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] expected_shape = [1, 6, 768] self.assertEqual(output.shape, expected_shape) expected_slice = tf.constant( [ [ [-0.1067172, 0.08216473, 0.0024543], [-0.03465879, 0.8354118, -0.03252288], [-0.06569476, -0.12424111, -0.0494436], ] ] ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4)
AdaMix/tests/test_modeling_tf_mpnet.py/0
{ "file_path": "AdaMix/tests/test_modeling_tf_mpnet.py", "repo_id": "AdaMix", "token_count": 4597 }
73
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class XLMRobertaModelIntegrationTest(unittest.TestCase): @slow def test_xlm_roberta_base(self): model = XLMRobertaModel.from_pretrained("xlm-roberta-base") input_ids = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]]) # The dog is cute and lives in the garden house expected_output_shape = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim expected_output_values_last_dim = torch.tensor( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] output = model(input_ids)["last_hidden_state"].detach() self.assertEqual(output.shape, expected_output_shape) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3)) @slow def test_xlm_roberta_large(self): model = XLMRobertaModel.from_pretrained("xlm-roberta-large") input_ids = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]]) # The dog is cute and lives in the garden house expected_output_shape = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim expected_output_values_last_dim = torch.tensor( [[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] output = model(input_ids)["last_hidden_state"].detach() self.assertEqual(output.shape, expected_output_shape) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3))
AdaMix/tests/test_modeling_xlm_roberta.py/0
{ "file_path": "AdaMix/tests/test_modeling_xlm_roberta.py", "repo_id": "AdaMix", "token_count": 1239 }
74
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import pipeline from transformers.testing_utils import require_torch from .test_pipelines_common import MonoInputPipelineCommonMixin class TextGenerationPipelineTests(MonoInputPipelineCommonMixin, unittest.TestCase): pipeline_task = "text-generation" pipeline_running_kwargs = {"prefix": "This is "} small_models = ["sshleifer/tiny-ctrl"] # Models tested without the @slow decorator large_models = [] # Models tested with the @slow decorator def test_simple_generation(self): nlp = pipeline(task="text-generation", model=self.small_models[0]) # text-generation is non-deterministic by nature, we can't fully test the output outputs = nlp("This is a test") self.assertEqual(len(outputs), 1) self.assertEqual(list(outputs[0].keys()), ["generated_text"]) self.assertEqual(type(outputs[0]["generated_text"]), str) outputs = nlp(["This is a test", "This is a second test"]) self.assertEqual(len(outputs[0]), 1) self.assertEqual(list(outputs[0][0].keys()), ["generated_text"]) self.assertEqual(type(outputs[0][0]["generated_text"]), str) self.assertEqual(list(outputs[1][0].keys()), ["generated_text"]) self.assertEqual(type(outputs[1][0]["generated_text"]), str) @require_torch def test_generation_output_style(self): text_generator = pipeline(task="text-generation", model=self.small_models[0]) # text-generation is non-deterministic by nature, we can't fully test the output outputs = text_generator("This is a test") self.assertIn("This is a test", outputs[0]["generated_text"]) outputs = text_generator("This is a test", return_full_text=False) self.assertNotIn("This is a test", outputs[0]["generated_text"]) text_generator = pipeline(task="text-generation", model=self.small_models[0], return_full_text=False) outputs = text_generator("This is a test") self.assertNotIn("This is a test", outputs[0]["generated_text"]) outputs = text_generator("This is a test", return_full_text=True) self.assertIn("This is a test", outputs[0]["generated_text"])
AdaMix/tests/test_pipelines_text_generation.py/0
{ "file_path": "AdaMix/tests/test_pipelines_text_generation.py", "repo_id": "AdaMix", "token_count": 985 }
75
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for Blenderbot Tokenizers, including common tests for BlenderbotSmallTokenizer.""" import unittest from transformers.file_utils import cached_property from transformers.models.blenderbot.tokenization_blenderbot import BlenderbotTokenizer class Blenderbot3BTokenizerTests(unittest.TestCase): @cached_property def tokenizer_3b(self): return BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B") def test_encode_decode_cycle(self): tok = self.tokenizer_3b src_text = " I am a small frog." encoded = tok([src_text], padding=False, truncation=False)["input_ids"] decoded = tok.batch_decode(encoded, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] assert src_text == decoded def test_3B_tokenization_same_as_parlai(self): assert self.tokenizer_3b.add_prefix_space assert self.tokenizer_3b([" Sam", "Sam"]).input_ids == [[5502, 2], [5502, 2]]
AdaMix/tests/test_tokenization_blenderbot.py/0
{ "file_path": "AdaMix/tests/test_tokenization_blenderbot.py", "repo_id": "AdaMix", "token_count": 539 }
76
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch from .test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right EN_CODE = 250004 RO_CODE = 250020 @require_sentencepiece @require_tokenizers class MBartTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = MBartTokenizer rust_tokenizer_class = MBartTokenizerFast test_rust_tokenizer = True def setUp(self): super().setUp() # We have a SentencePiece fixture for testing tokenizer = MBartTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_full_tokenizer(self): tokenizer = MBartTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) @require_torch @require_sentencepiece @require_tokenizers class MBartEnroIntegrationTest(unittest.TestCase): checkpoint_name = "facebook/mbart-large-en-ro" src_text = [ " UN Chief Says There Is No Military Solution in Syria", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] tgt_text = [ "Şeful ONU declară că nu există o soluţie militară în Siria", 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] expected_src_tokens = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE] @classmethod def setUpClass(cls): cls.tokenizer: MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name, src_lang="en_XX", tgt_lang="ro_RO" ) cls.pad_token_id = 1 return cls def check_language_codes(self): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"], 250001) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"], 250004) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"], 250020) def test_enro_tokenizer_batch_encode_plus(self): ids = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens, ids) def test_enro_tokenizer_decode_ignores_language_codes(self): self.assertIn(RO_CODE, self.tokenizer.all_special_ids) generated_ids = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] result = self.tokenizer.decode(generated_ids, skip_special_tokens=True) expected_romanian = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True) self.assertEqual(result, expected_romanian) self.assertNotIn(self.tokenizer.eos_token, result) def test_enro_tokenizer_truncation(self): src_text = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0], str) desired_max_length = 10 ids = self.tokenizer(src_text, max_length=desired_max_length, truncation=True).input_ids[0] self.assertEqual(ids[-2], 2) self.assertEqual(ids[-1], EN_CODE) self.assertEqual(len(ids), desired_max_length) def test_mask_token(self): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]), [250026, 250001]) def test_special_tokens_unaffacted_by_save_load(self): tmpdirname = tempfile.mkdtemp() original_special_tokens = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(tmpdirname) new_tok = MBartTokenizer.from_pretrained(tmpdirname) self.assertDictEqual(new_tok.fairseq_tokens_to_ids, original_special_tokens) @require_torch def test_batch_fairseq_parity(self): batch = self.tokenizer(self.src_text, padding=True) with self.tokenizer.as_target_tokenizer(): targets = self.tokenizer(self.tgt_text, padding=True, return_tensors="pt") labels = targets["input_ids"] batch["decoder_input_ids"] = shift_tokens_right(labels, self.tokenizer.pad_token_id).tolist() # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:] == [2, EN_CODE] assert batch.decoder_input_ids[1][0] == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def test_enro_tokenizer_prepare_batch(self): batch = self.tokenizer( self.src_text, padding=True, truncation=True, max_length=len(self.expected_src_tokens), return_tensors="pt" ) with self.tokenizer.as_target_tokenizer(): targets = self.tokenizer( self.tgt_text, padding=True, truncation=True, max_length=len(self.expected_src_tokens), return_tensors="pt", ) labels = targets["input_ids"] batch["decoder_input_ids"] = shift_tokens_right(labels, self.tokenizer.pad_token_id) self.assertIsInstance(batch, BatchEncoding) self.assertEqual((2, 14), batch.input_ids.shape) self.assertEqual((2, 14), batch.attention_mask.shape) result = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens, result) self.assertEqual(2, batch.decoder_input_ids[0, -1]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens, []) self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE]) def test_seq2seq_max_length(self): batch = self.tokenizer(self.src_text, padding=True, truncation=True, max_length=3, return_tensors="pt") with self.tokenizer.as_target_tokenizer(): targets = self.tokenizer(self.tgt_text, padding=True, truncation=True, max_length=10, return_tensors="pt") labels = targets["input_ids"] batch["decoder_input_ids"] = shift_tokens_right(labels, self.tokenizer.pad_token_id) self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.decoder_input_ids.shape[1], 10)
AdaMix/tests/test_tokenization_mbart.py/0
{ "file_path": "AdaMix/tests/test_tokenization_mbart.py", "repo_id": "AdaMix", "token_count": 4634 }
77
# coding=utf-8 # Copyright 2018 HuggingFace Inc.. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pickle import unittest from typing import Callable, Optional import numpy as np from transformers import BatchEncoding, BertTokenizer, BertTokenizerFast, PreTrainedTokenizer, TensorType, TokenSpan from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer from transformers.testing_utils import CaptureStderr, require_flax, require_tf, require_tokenizers, require_torch, slow class TokenizerUtilsTest(unittest.TestCase): def check_tokenizer_from_pretrained(self, tokenizer_class): s3_models = list(tokenizer_class.max_model_input_sizes.keys()) for model_name in s3_models[:1]: tokenizer = tokenizer_class.from_pretrained(model_name) self.assertIsNotNone(tokenizer) self.assertIsInstance(tokenizer, tokenizer_class) self.assertIsInstance(tokenizer, PreTrainedTokenizer) for special_tok in tokenizer.all_special_tokens: self.assertIsInstance(special_tok, str) special_tok_id = tokenizer.convert_tokens_to_ids(special_tok) self.assertIsInstance(special_tok_id, int) def assert_dump_and_restore(self, be_original: BatchEncoding, equal_op: Optional[Callable] = None): batch_encoding_str = pickle.dumps(be_original) self.assertIsNotNone(batch_encoding_str) be_restored = pickle.loads(batch_encoding_str) # Ensure is_fast is correctly restored self.assertEqual(be_restored.is_fast, be_original.is_fast) # Ensure encodings are potentially correctly restored if be_original.is_fast: self.assertIsNotNone(be_restored.encodings) else: self.assertIsNone(be_restored.encodings) # Ensure the keys are the same for original_v, restored_v in zip(be_original.values(), be_restored.values()): if equal_op: self.assertTrue(equal_op(restored_v, original_v)) else: self.assertEqual(restored_v, original_v) @slow def test_pretrained_tokenizers(self): self.check_tokenizer_from_pretrained(GPT2Tokenizer) def test_tensor_type_from_str(self): self.assertEqual(TensorType("tf"), TensorType.TENSORFLOW) self.assertEqual(TensorType("pt"), TensorType.PYTORCH) self.assertEqual(TensorType("np"), TensorType.NUMPY) @require_tokenizers def test_batch_encoding_pickle(self): import numpy as np tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") # Python no tensor with self.subTest("BatchEncoding (Python, return_tensors=None)"): self.assert_dump_and_restore(tokenizer_p("Small example to encode")) with self.subTest("BatchEncoding (Python, return_tensors=NUMPY)"): self.assert_dump_and_restore( tokenizer_p("Small example to encode", return_tensors=TensorType.NUMPY), np.array_equal ) with self.subTest("BatchEncoding (Rust, return_tensors=None)"): self.assert_dump_and_restore(tokenizer_r("Small example to encode")) with self.subTest("BatchEncoding (Rust, return_tensors=NUMPY)"): self.assert_dump_and_restore( tokenizer_r("Small example to encode", return_tensors=TensorType.NUMPY), np.array_equal ) @require_tf @require_tokenizers def test_batch_encoding_pickle_tf(self): import tensorflow as tf def tf_array_equals(t1, t2): return tf.reduce_all(tf.equal(t1, t2)) tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") with self.subTest("BatchEncoding (Python, return_tensors=TENSORFLOW)"): self.assert_dump_and_restore( tokenizer_p("Small example to encode", return_tensors=TensorType.TENSORFLOW), tf_array_equals ) with self.subTest("BatchEncoding (Rust, return_tensors=TENSORFLOW)"): self.assert_dump_and_restore( tokenizer_r("Small example to encode", return_tensors=TensorType.TENSORFLOW), tf_array_equals ) @require_torch @require_tokenizers def test_batch_encoding_pickle_pt(self): import torch tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") with self.subTest("BatchEncoding (Python, return_tensors=PYTORCH)"): self.assert_dump_and_restore( tokenizer_p("Small example to encode", return_tensors=TensorType.PYTORCH), torch.equal ) with self.subTest("BatchEncoding (Rust, return_tensors=PYTORCH)"): self.assert_dump_and_restore( tokenizer_r("Small example to encode", return_tensors=TensorType.PYTORCH), torch.equal ) @require_tokenizers def test_batch_encoding_is_fast(self): tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") with self.subTest("Python Tokenizer"): self.assertFalse(tokenizer_p("Small example to_encode").is_fast) with self.subTest("Rust Tokenizer"): self.assertTrue(tokenizer_r("Small example to_encode").is_fast) @require_tokenizers def test_batch_encoding_word_to_tokens(self): tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") encoded = tokenizer_r(["Test", "\xad", "test"], is_split_into_words=True) self.assertEqual(encoded.word_to_tokens(0), TokenSpan(start=1, end=2)) self.assertEqual(encoded.word_to_tokens(1), None) self.assertEqual(encoded.word_to_tokens(2), TokenSpan(start=2, end=3)) def test_batch_encoding_with_labels(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="np") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="np") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="np", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) @require_torch def test_batch_encoding_with_labels_pt(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="pt") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="pt") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="pt", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) @require_tf def test_batch_encoding_with_labels_tf(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="tf") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="tf") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="tf", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) @require_flax def test_batch_encoding_with_labels_jax(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="jax") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="jax") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="jax", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) def test_padding_accepts_tensors(self): features = [{"input_ids": np.array([0, 1, 2])}, {"input_ids": np.array([0, 1, 2, 3])}] tokenizer = BertTokenizer.from_pretrained("bert-base-cased") batch = tokenizer.pad(features, padding=True) self.assertTrue(isinstance(batch["input_ids"], np.ndarray)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) batch = tokenizer.pad(features, padding=True, return_tensors="np") self.assertTrue(isinstance(batch["input_ids"], np.ndarray)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) @require_torch def test_padding_accepts_tensors_pt(self): import torch features = [{"input_ids": torch.tensor([0, 1, 2])}, {"input_ids": torch.tensor([0, 1, 2, 3])}] tokenizer = BertTokenizer.from_pretrained("bert-base-cased") batch = tokenizer.pad(features, padding=True) self.assertTrue(isinstance(batch["input_ids"], torch.Tensor)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) batch = tokenizer.pad(features, padding=True, return_tensors="pt") self.assertTrue(isinstance(batch["input_ids"], torch.Tensor)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) @require_tf def test_padding_accepts_tensors_tf(self): import tensorflow as tf features = [{"input_ids": tf.constant([0, 1, 2])}, {"input_ids": tf.constant([0, 1, 2, 3])}] tokenizer = BertTokenizer.from_pretrained("bert-base-cased") batch = tokenizer.pad(features, padding=True) self.assertTrue(isinstance(batch["input_ids"], tf.Tensor)) self.assertEqual(batch["input_ids"].numpy().tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) batch = tokenizer.pad(features, padding=True, return_tensors="tf") self.assertTrue(isinstance(batch["input_ids"], tf.Tensor)) self.assertEqual(batch["input_ids"].numpy().tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]])
AdaMix/tests/test_tokenization_utils.py/0
{ "file_path": "AdaMix/tests/test_tokenization_utils.py", "repo_id": "AdaMix", "token_count": 5256 }
78
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import inspect import os import re from pathlib import Path # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_repo.py PATH_TO_TRANSFORMERS = "src/transformers" PATH_TO_TESTS = "tests" PATH_TO_DOC = "docs/source" # Update this list for models that are not tested with a comment explaining the reason it should not be. # Being in this list is an exception and should **not** be the rule. IGNORE_NON_TESTED = [ # models to ignore for not tested "M2M100Encoder", # Building part of bigger (tested) model. "M2M100Decoder", # Building part of bigger (tested) model. "Speech2TextEncoder", # Building part of bigger (tested) model. "Speech2TextDecoder", # Building part of bigger (tested) model. "LEDEncoder", # Building part of bigger (tested) model. "LEDDecoder", # Building part of bigger (tested) model. "BartDecoderWrapper", # Building part of bigger (tested) model. "BartEncoder", # Building part of bigger (tested) model. "BertLMHeadModel", # Needs to be setup as decoder. "BlenderbotSmallEncoder", # Building part of bigger (tested) model. "BlenderbotSmallDecoderWrapper", # Building part of bigger (tested) model. "BlenderbotEncoder", # Building part of bigger (tested) model. "BlenderbotDecoderWrapper", # Building part of bigger (tested) model. "MBartEncoder", # Building part of bigger (tested) model. "MBartDecoderWrapper", # Building part of bigger (tested) model. "PegasusEncoder", # Building part of bigger (tested) model. "PegasusDecoderWrapper", # Building part of bigger (tested) model. "DPREncoder", # Building part of bigger (tested) model. "DPRSpanPredictor", # Building part of bigger (tested) model. "ProphetNetDecoderWrapper", # Building part of bigger (tested) model. "ReformerForMaskedLM", # Needs to be setup as decoder. "T5Stack", # Building part of bigger (tested) model. "TFDPREncoder", # Building part of bigger (tested) model. "TFDPRSpanPredictor", # Building part of bigger (tested) model. "TFElectraMainLayer", # Building part of bigger (tested) model (should it be a TFPreTrainedModel ?) "TFRobertaForMultipleChoice", # TODO: fix "SeparableConv1D", # Building part of bigger (tested) model. ] # Update this list with test files that don't have a tester with a `all_model_classes` variable and which don't # trigger the common tests. TEST_FILES_WITH_NO_COMMON_TESTS = [ "test_modeling_camembert.py", "test_modeling_flax_bert.py", "test_modeling_flax_roberta.py", "test_modeling_mbart.py", "test_modeling_mt5.py", "test_modeling_pegasus.py", "test_modeling_tf_camembert.py", "test_modeling_tf_mt5.py", "test_modeling_tf_xlm_roberta.py", "test_modeling_xlm_prophetnet.py", "test_modeling_xlm_roberta.py", ] # Update this list for models that are not in any of the auto MODEL_XXX_MAPPING. Being in this list is an exception and # should **not** be the rule. IGNORE_NON_AUTO_CONFIGURED = [ # models to ignore for model xxx mapping "M2M100Encoder", "M2M100Decoder", "Speech2TextEncoder", "Speech2TextDecoder", "LEDEncoder", "LEDDecoder", "BartDecoder", "BartDecoderWrapper", "BartEncoder", "BlenderbotSmallEncoder", "BlenderbotSmallDecoder", "BlenderbotSmallDecoderWrapper", "BlenderbotEncoder", "BlenderbotDecoder", "BlenderbotDecoderWrapper", "DPRContextEncoder", "DPREncoder", "DPRReader", "DPRSpanPredictor", "FlaubertForQuestionAnswering", "FunnelBaseModel", "GPT2DoubleHeadsModel", "MT5EncoderModel", "MBartEncoder", "MBartDecoder", "MBartDecoderWrapper", "OpenAIGPTDoubleHeadsModel", "PegasusEncoder", "PegasusDecoder", "PegasusDecoderWrapper", "ProphetNetDecoder", "ProphetNetEncoder", "ProphetNetDecoderWrapper", "RagModel", "RagSequenceForGeneration", "RagTokenForGeneration", "T5Stack", "T5EncoderModel", "TFDPRContextEncoder", "TFDPREncoder", "TFDPRReader", "TFDPRSpanPredictor", "TFFunnelBaseModel", "TFGPT2DoubleHeadsModel", "TFMT5EncoderModel", "TFOpenAIGPTDoubleHeadsModel", "TFRagModel", "TFRagSequenceForGeneration", "TFRagTokenForGeneration", "TFT5EncoderModel", "Wav2Vec2ForCTC", "XLMForQuestionAnswering", "XLMProphetNetDecoder", "XLMProphetNetEncoder", "XLNetForQuestionAnswering", "SeparableConv1D", ] # This is to make sure the transformers module imported is the one in the repo. spec = importlib.util.spec_from_file_location( "transformers", os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) transformers = spec.loader.load_module() # If some modeling modules should be ignored for all checks, they should be added in the nested list # _ignore_modules of this function. def get_model_modules(): """ Get the model modules inside the transformers library. """ _ignore_modules = [ "modeling_auto", "modeling_encoder_decoder", "modeling_marian", "modeling_mmbt", "modeling_outputs", "modeling_retribert", "modeling_utils", "modeling_flax_auto", "modeling_flax_utils", "modeling_transfo_xl_utilities", "modeling_tf_auto", "modeling_tf_outputs", "modeling_tf_pytorch_utils", "modeling_tf_utils", "modeling_tf_transfo_xl_utilities", ] modules = [] for model in dir(transformers.models): # There are some magic dunder attributes in the dir, we ignore them if not model.startswith("__"): model_module = getattr(transformers.models, model) for submodule in dir(model_module): if submodule.startswith("modeling") and submodule not in _ignore_modules: modeling_module = getattr(model_module, submodule) if inspect.ismodule(modeling_module): modules.append(modeling_module) return modules def get_models(module): """ Get the objects in module that are models.""" models = [] model_classes = (transformers.PreTrainedModel, transformers.TFPreTrainedModel) for attr_name in dir(module): if "Pretrained" in attr_name or "PreTrained" in attr_name: continue attr = getattr(module, attr_name) if isinstance(attr, type) and issubclass(attr, model_classes) and attr.__module__ == module.__name__: models.append((attr_name, attr)) return models # If some test_modeling files should be ignored when checking models are all tested, they should be added in the # nested list _ignore_files of this function. def get_model_test_files(): """ Get the model test files.""" _ignore_files = [ "test_modeling_common", "test_modeling_encoder_decoder", "test_modeling_marian", "test_modeling_tf_common", ] test_files = [] for filename in os.listdir(PATH_TO_TESTS): if ( os.path.isfile(f"{PATH_TO_TESTS}/{filename}") and filename.startswith("test_modeling") and not os.path.splitext(filename)[0] in _ignore_files ): test_files.append(filename) return test_files # This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the tester class # for the all_model_classes variable. def find_tested_models(test_file): """ Parse the content of test_file to detect what's in all_model_classes""" # This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the class with open(os.path.join(PATH_TO_TESTS, test_file), "r", encoding="utf-8", newline="\n") as f: content = f.read() all_models = re.findall(r"all_model_classes\s+=\s+\(\s*\(([^\)]*)\)", content) # Check with one less parenthesis as well all_models += re.findall(r"all_model_classes\s+=\s+\(([^\)]*)\)", content) if len(all_models) > 0: model_tested = [] for entry in all_models: for line in entry.split(","): name = line.strip() if len(name) > 0: model_tested.append(name) return model_tested def check_models_are_tested(module, test_file): """ Check models defined in module are tested in test_file.""" defined_models = get_models(module) tested_models = find_tested_models(test_file) if tested_models is None: if test_file in TEST_FILES_WITH_NO_COMMON_TESTS: return return [ f"{test_file} should define `all_model_classes` to apply common tests to the models it tests. " + "If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file " + "`utils/check_repo.py`." ] failures = [] for model_name, _ in defined_models: if model_name not in tested_models and model_name not in IGNORE_NON_TESTED: failures.append( f"{model_name} is defined in {module.__name__} but is not tested in " + f"{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file." + "If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`" + "in the file `utils/check_repo.py`." ) return failures def check_all_models_are_tested(): """ Check all models are properly tested.""" modules = get_model_modules() test_files = get_model_test_files() failures = [] for module in modules: test_file = f"test_{module.__name__.split('.')[-1]}.py" if test_file not in test_files: failures.append(f"{module.__name__} does not have its corresponding test file {test_file}.") new_failures = check_models_are_tested(module, test_file) if new_failures is not None: failures += new_failures if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) def get_all_auto_configured_models(): """ Return the list of all models in at least one auto class.""" result = set() # To avoid duplicates we concatenate all model classes in a set. for attr_name in dir(transformers.models.auto.modeling_auto): if attr_name.startswith("MODEL_") and attr_name.endswith("MAPPING"): result = result | set(getattr(transformers.models.auto.modeling_auto, attr_name).values()) for attr_name in dir(transformers.models.auto.modeling_tf_auto): if attr_name.startswith("TF_MODEL_") and attr_name.endswith("MAPPING"): result = result | set(getattr(transformers.models.auto.modeling_tf_auto, attr_name).values()) return [cls.__name__ for cls in result] def check_models_are_auto_configured(module, all_auto_models): """ Check models defined in module are each in an auto class.""" defined_models = get_models(module) failures = [] for model_name, _ in defined_models: if model_name not in all_auto_models and model_name not in IGNORE_NON_AUTO_CONFIGURED: failures.append( f"{model_name} is defined in {module.__name__} but is not present in any of the auto mapping. " "If that is intended behavior, add its name to `IGNORE_NON_AUTO_CONFIGURED` in the file " "`utils/check_repo.py`." ) return failures def check_all_models_are_auto_configured(): """ Check all models are each in an auto class.""" modules = get_model_modules() all_auto_models = get_all_auto_configured_models() failures = [] for module in modules: new_failures = check_models_are_auto_configured(module, all_auto_models) if new_failures is not None: failures += new_failures if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) _re_decorator = re.compile(r"^\s*@(\S+)\s+$") def check_decorator_order(filename): """ Check that in the test file `filename` the slow decorator is always last.""" with open(filename, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() decorator_before = None errors = [] for i, line in enumerate(lines): search = _re_decorator.search(line) if search is not None: decorator_name = search.groups()[0] if decorator_before is not None and decorator_name.startswith("parameterized"): errors.append(i) decorator_before = decorator_name elif decorator_before is not None: decorator_before = None return errors def check_all_decorator_order(): """ Check that in all test files, the slow decorator is always last.""" errors = [] for fname in os.listdir(PATH_TO_TESTS): if fname.endswith(".py"): filename = os.path.join(PATH_TO_TESTS, fname) new_errors = check_decorator_order(filename) errors += [f"- {filename}, line {i}" for i in new_errors] if len(errors) > 0: msg = "\n".join(errors) raise ValueError( f"The parameterized decorator (and its variants) should always be first, but this is not the case in the following files:\n{msg}" ) def find_all_documented_objects(): """ Parse the content of all doc files to detect which classes and functions it documents""" documented_obj = [] for doc_file in Path(PATH_TO_DOC).glob("**/*.rst"): with open(doc_file, "r", encoding="utf-8", newline="\n") as f: content = f.read() raw_doc_objs = re.findall(r"(?:autoclass|autofunction):: transformers.(\S+)\s+", content) documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs] return documented_obj # One good reason for not being documented is to be deprecated. Put in this list deprecated objects. DEPRECATED_OBJECTS = [ "AutoModelWithLMHead", "BartPretrainedModel", "GlueDataset", "GlueDataTrainingArguments", "LineByLineTextDataset", "LineByLineWithRefDataset", "LineByLineWithSOPTextDataset", "PretrainedBartModel", "PretrainedFSMTModel", "SingleSentenceClassificationProcessor", "SquadDataTrainingArguments", "SquadDataset", "SquadExample", "SquadFeatures", "SquadV1Processor", "SquadV2Processor", "TFAutoModelWithLMHead", "TFBartPretrainedModel", "TextDataset", "TextDatasetForNextSentencePrediction", "Wav2Vec2ForMaskedLM", "Wav2Vec2Tokenizer", "glue_compute_metrics", "glue_convert_examples_to_features", "glue_output_modes", "glue_processors", "glue_tasks_num_labels", "squad_convert_examples_to_features", "xnli_compute_metrics", "xnli_output_modes", "xnli_processors", "xnli_tasks_num_labels", ] # Exceptionally, some objects should not be documented after all rules passed. # ONLY PUT SOMETHING IN THIS LIST AS A LAST RESORT! UNDOCUMENTED_OBJECTS = [ "AddedToken", # This is a tokenizers class. "BasicTokenizer", # Internal, should never have been in the main init. "DPRPretrainedReader", # Like an Encoder. "ModelCard", # Internal type. "SqueezeBertModule", # Internal building block (should have been called SqueezeBertLayer) "TFDPRPretrainedReader", # Like an Encoder. "TransfoXLCorpus", # Internal type. "WordpieceTokenizer", # Internal, should never have been in the main init. "absl", # External module "add_end_docstrings", # Internal, should never have been in the main init. "add_start_docstrings", # Internal, should never have been in the main init. "cached_path", # Internal used for downloading models. "convert_tf_weight_name_to_pt_weight_name", # Internal used to convert model weights "logger", # Internal logger "logging", # External module ] # This list should be empty. Objects in it should get their own doc page. SHOULD_HAVE_THEIR_OWN_PAGE = [ # bert-japanese "BertJapaneseTokenizer", "CharacterTokenizer", "MecabTokenizer", # Benchmarks "PyTorchBenchmark", "PyTorchBenchmarkArguments", "TensorFlowBenchmark", "TensorFlowBenchmarkArguments", ] def ignore_undocumented(name): """Rules to determine if `name` should be undocumented.""" # NOT DOCUMENTED ON PURPOSE. # Constants uppercase are not documented. if name.isupper(): return True # PreTrainedModels / Encoders / Decoders / Layers / Embeddings / Attention are not documented. if ( name.endswith("PreTrainedModel") or name.endswith("Decoder") or name.endswith("Encoder") or name.endswith("Layer") or name.endswith("Embeddings") or name.endswith("Attention") ): return True # Submodules are not documented. if os.path.isdir(os.path.join(PATH_TO_TRANSFORMERS, name)) or os.path.isfile( os.path.join(PATH_TO_TRANSFORMERS, f"{name}.py") ): return True # All load functions are not documented. if name.startswith("load_tf") or name.startswith("load_pytorch"): return True # is_xxx_available functions are not documented. if name.startswith("is_") and name.endswith("_available"): return True # Deprecated objects are not documented. if name in DEPRECATED_OBJECTS or name in UNDOCUMENTED_OBJECTS: return True # MMBT model does not really work. if name.startswith("MMBT"): return True # NOT DOCUMENTED BUT NOT ON PURPOSE, SHOULD BE FIXED! # All data collators should be documented if name.startswith("DataCollator") or name.endswith("data_collator"): return True if name in SHOULD_HAVE_THEIR_OWN_PAGE: return True return False def check_all_objects_are_documented(): """ Check all models are properly documented.""" documented_objs = find_all_documented_objects() modules = transformers._modules objects = [c for c in dir(transformers) if c not in modules and not c.startswith("_")] undocumented_objs = [c for c in objects if c not in documented_objs and not ignore_undocumented(c)] if len(undocumented_objs) > 0: raise Exception( "The following objects are in the public init so should be documented:\n - " + "\n - ".join(undocumented_objs) ) def check_repo_quality(): """ Check all models are properly tested and documented.""" print("Checking all models are properly tested.") check_all_decorator_order() check_all_models_are_tested() print("Checking all objects are properly documented.") check_all_objects_are_documented() print("Checking all models are in at least one auto class.") check_all_models_are_auto_configured() if __name__ == "__main__": check_repo_quality()
AdaMix/utils/check_repo.py/0
{ "file_path": "AdaMix/utils/check_repo.py", "repo_id": "AdaMix", "token_count": 7766 }
79
from argparse import ArgumentParser import airsimdroneracinglab as airsim import time import utils import threading import numpy as np import cv2 from baseline_racer import BaselineRacer class BaselineRacerImageBenchmarker(BaselineRacer): def __init__( self, img_benchmark_type="simGetImage", drone_name="drone_1", viz_traj=False, viz_traj_color_rgba=[1.0, 1.0, 0.0, 1.0], viz_image_cv2=False, ): super().__init__( drone_name=drone_name, viz_traj=viz_traj, viz_image_cv2=viz_image_cv2 ) self.image_benchmark_num_images = 0 self.image_benchmark_total_time = 0.0 self.image_callback_thread = None if img_benchmark_type == "simGetImage": self.image_callback_thread = threading.Thread( target=self.repeat_timer_img, args=(self.image_callback_benchmark_simGetImage, 0.05), ) if img_benchmark_type == "simGetImages": self.image_callback_thread = threading.Thread( target=self.repeat_timer_img, args=(self.image_callback_benchmark_simGetImages, 0.05), ) self.is_image_thread_active = False def start_img_benchmark_thread(self): if not self.is_image_thread_active: self.is_image_thread_active = True self.image_callback_thread.start() print("Started img image_callback thread") def stop_img_benchmark_thread(self): if self.is_image_thread_active: self.is_image_thread_active = False self.image_callback_thread.join() print("Stopped image callback thread.") def repeat_timer_img(self, task, period): while self.is_image_thread_active: task() time.sleep(period) def print_benchmark_results(self): avg_fps = 1.0 / ( (self.image_benchmark_total_time) / float(self.image_benchmark_num_images) ) print( self.level_name + ": {} avg_fps for {} num of images".format( avg_fps, self.image_benchmark_num_images ) ) def image_callback_benchmark_simGetImage(self): self.image_benchmark_num_images += 1 iter_start_time = time.time() response = self.airsim_client_images.simGetImage( "fpv_cam", airsim.ImageType.Scene ) img_rgb = cv2.imdecode( airsim.string_to_uint8_array(response), cv2.IMREAD_UNCHANGED ) self.image_benchmark_total_time += time.time() - iter_start_time avg_fps = 1.0 / ( (self.image_benchmark_total_time) / float(self.image_benchmark_num_images) ) print( self.level_name + ": {} avg_fps for {} num of images".format( avg_fps, self.image_benchmark_num_images ) ) # uncomment following lines to viz image # if self.viz_image_cv2: # cv2.imshow("img_rgb", img_rgb_1d_new) # cv2.waitKey(1) def image_callback_benchmark_simGetImages(self): self.image_benchmark_num_images += 1 iter_start_time = time.time() request = [airsim.ImageRequest("fpv_cam", airsim.ImageType.Scene, False, False)] response = self.airsim_client_images.simGetImages(request) img_rgb_1d = np.fromstring(response[0].image_data_uint8, dtype=np.uint8) img_rgb = img_rgb_1d.reshape(response[0].height, response[0].width, 3) self.image_benchmark_total_time += time.time() - iter_start_time avg_fps = 1.0 / ( (self.image_benchmark_total_time) / float(self.image_benchmark_num_images) ) print( self.level_name + ": {} avg_fps for {} num of images".format( avg_fps, self.image_benchmark_num_images ) ) # uncomment following lines to viz image # if self.viz_image_cv2: # cv2.imshow("img_rgb", img_rgb_1d_new) # cv2.waitKey(1) def main(args): # ensure you have generated the neurips planning settings file by running python generate_settings_file.py baseline_racer = BaselineRacerImageBenchmarker( img_benchmark_type=args.img_benchmark_type, drone_name="drone_1", viz_traj=args.viz_traj, viz_traj_color_rgba=[1.0, 1.0, 0.0, 1.0], viz_image_cv2=args.viz_image_cv2, ) baseline_racer.load_level(args.level_name) if args.level_name == "Qualifier_Tier_1": args.race_tier = 1 if args.level_name == "Qualifier_Tier_2": args.race_tier = 2 if args.level_name == "Qualifier_Tier_3": args.race_tier = 3 baseline_racer.start_race(args.race_tier) baseline_racer.initialize_drone() baseline_racer.takeoff_with_moveOnSpline() baseline_racer.get_ground_truth_gate_poses() baseline_racer.start_img_benchmark_thread() baseline_racer.fly_through_all_gates_at_once_with_moveOnSpline().join() baseline_racer.stop_img_benchmark_thread() baseline_racer.print_benchmark_results() if __name__ == "__main__": parser = ArgumentParser() parser.add_argument( "--level_name", type=str, choices=[ "Soccer_Field_Easy", "Soccer_Field_Medium", "ZhangJiaJie_Medium", "Building99_Hard", "Qualifier_Tier_1", "Qualifier_Tier_2", "Qualifier_Tier_3", ], default="ZhangJiaJie_Medium", ) parser.add_argument( "--enable_viz_traj", dest="viz_traj", action="store_true", default=False ) parser.add_argument( "--img_benchmark_type", type=str, choices=["simGetImage", "simGetImages"], default="simGetImages", ) parser.add_argument( "--enable_viz_image_cv2", dest="viz_image_cv2", action="store_true", default=False, ) parser.add_argument("--race_tier", type=int, choices=[1, 2, 3], default=1) args = parser.parse_args() main(args)
AirSim-Drone-Racing-Lab/baselines/baseline_racer_image_benchmarker.py/0
{ "file_path": "AirSim-Drone-Racing-Lab/baselines/baseline_racer_image_benchmarker.py", "repo_id": "AirSim-Drone-Racing-Lab", "token_count": 2927 }
80
import airsimdroneracinglab import threading import time class ReproduceResetRaceCondition: def __init__(self, drone_name="drone_1"): self.airsim_client = airsimdroneracinglab.MultirotorClient() self.airsim_client_2 = airsimdroneracinglab.MultirotorClient() self.airsim_client_3 = airsimdroneracinglab.MultirotorClient() self.drone_name = drone_name self.is_thread_active = False self.thread_reset = threading.Thread( target=self.repeat_timer, args=(self.reset, 0.05) ) self.thread_reset_race = threading.Thread( target=self.repeat_timer, args=(self.reset_race, 0.03) ) self.thread_reset_and_reset_race = threading.Thread( target=self.repeat_timer, args=(self.reset_and_reset_race, 0.09) ) self.is_thread_active = False def repeat_timer(self, callback, period): while self.is_thread_active: callback() time.sleep(period) def load_level(self, level_name, sleep_sec=2.0): self.level_name = level_name self.airsim_client.simLoadLevel(self.level_name) self.airsim_client.confirmConnection() # failsafe time.sleep(sleep_sec) # let the environment load completely def reset(self): print(time.time(), "called reset") self.airsim_client.reset() def reset_race(self): print(time.time(), "called simResetRace") self.airsim_client_2.simResetRace() def reset_and_reset_race(self): print(time.time(), "called reset, followed by simResetRace") self.airsim_client_3.reset() self.airsim_client_3.simResetRace() def start_race(self, tier): print(time.time(), "called start race") self.airsim_client.simStartRace(tier) def initialize_drone(self): self.airsim_client.enableApiControl(vehicle_name=self.drone_name) self.airsim_client.arm(vehicle_name=self.drone_name) # set default values for trajectory tracker gains traj_tracker_gains = airsimdroneracinglab.TrajectoryTrackerGains( kp_cross_track=5.0, kd_cross_track=0.0, kp_vel_cross_track=3.0, kd_vel_cross_track=0.0, kp_along_track=0.4, kd_along_track=0.0, kp_vel_along_track=0.04, kd_vel_along_track=0.0, kp_z_track=2.0, kd_z_track=0.0, kp_vel_z=0.4, kd_vel_z=0.0, kp_yaw=3.0, kd_yaw=0.1, ) self.airsim_client.setTrajectoryTrackerGains( traj_tracker_gains, vehicle_name=self.drone_name ) time.sleep(0.2) def start_threads(self): if not self.is_thread_active: self.is_thread_active = True self.thread_reset.start() self.thread_reset_race.start() self.thread_reset_and_reset_race.start() print("Started threads") def stop_threads(self): if self.is_thread_active: self.is_thread_active = False self.thread_reset.join() self.thread_reset_race.join() self.thread_reset_and_reset_race.join() print("Stopped threads.") if __name__ == "__main__": reproducer = ReproduceResetRaceCondition("drone_1") reproducer.load_level("Qualifier_Tier_1") reproducer.initialize_drone() reproducer.start_race(3) time.sleep(5) reproducer.start_threads() time.sleep(3600) reproducer.stop_threads()
AirSim-Drone-Racing-Lab/tests/test_reset.py/0
{ "file_path": "AirSim-Drone-Racing-Lab/tests/test_reset.py", "repo_id": "AirSim-Drone-Racing-Lab", "token_count": 1714 }
81
import tensorflow as tf import os import sys curr_dir = os.path.dirname(os.path.abspath(__file__)) # imports import_path = os.path.join(curr_dir, '..') sys.path.insert(0, import_path) import racing_models.cmvae import racing_utils # DEFINE TRAINING META PARAMETERS data_dir = '/home/rb/all_files/airsim_datasets/soccer_1k' output_dir = '/home/rb/all_files/model_outputs/cmvae_con' batch_size = 32 epochs = 50 n_z = 10 latent_space_constraints = True img_res = 64 max_size = None # default is None learning_rate = 1e-4 # CUSTOM TF FUNCTIONS @tf.function def calc_weighted_loss_img(img_recon, images_np): flat_pred = tf.reshape(img_recon, [-1]) flat_gt = tf.reshape(images_np, [-1]) error_sq = tf.math.squared_difference(flat_gt, flat_pred) softmax_weights = tf.math.exp(error_sq) / tf.reduce_sum(tf.math.exp(error_sq)) weighted_error_sq = error_sq * softmax_weights loss = tf.reduce_sum(weighted_error_sq) return loss def reset_metrics(): train_loss_rec_img.reset_states() train_loss_rec_gate.reset_states() train_loss_kl.reset_states() test_loss_rec_img.reset_states() test_loss_rec_gate.reset_states() test_loss_kl.reset_states() @tf.function def regulate_weights(epoch): # for beta if epoch < 10.0: beta = 8.0 else: beta = 8.0 # t = 10 # beta_min = 0.0 #0.000001 # beta_max = 1.0 #0.0001 # if epoch < t: # # beta = beta_min + epoch/t*(beta_max-beta_min) # beta = beta_max * 0.95**(t-epoch) # ranges from 0.00592052922 to 0.95 # else: # beta = beta_max # for w_img if epoch < 100: w_img = 1.0 else: w_img = 1.0 # for w_gate if epoch < 100: w_gate = 1.0 else: w_gate = 1.0 return beta, w_img, w_gate @tf.function def compute_loss_unsupervised(img_gt, gate_gt, img_recon, gate_recon, means, stddev, mode): # compute reconstruction loss if mode == 0: img_loss = tf.losses.mean_squared_error(img_gt, img_recon) # img_loss = tf.losses.mean_absolute_error(img_gt, img_recon) gate_loss = tf.losses.mean_squared_error(gate_gt, gate_recon) kl_loss = -0.5 * tf.reduce_mean(tf.reduce_sum((1 + stddev - tf.math.pow(means, 2) - tf.math.exp(stddev)), axis=1)) # elif mode == 1: # # labels = tf.reshape(labels, predictions.shape) # # recon_loss = tf.losses.mean_squared_error(labels, predictions) # # recon_loss = loss_object(labels, predictions) # print('Predictions: {}'.format(predictions)) # print('Labels: {}'.format(labels)) # print('Lrec: {}'.format(recon_loss)) # copute KL loss: D_KL(Q(z|X,y) || P(z|X)) return img_loss, gate_loss, kl_loss @tf.function def train(img_gt, gate_gt, epoch, mode): # freeze the non-utilized weights # if mode == 0: # model.q_img.trainable = True # model.p_img.trainable = True # model.p_gate.trainable = True # elif mode == 1: # model.q_img.trainable = True # model.p_img.trainable = True # model.p_gate.trainable = False # elif mode == 2: # model.q_img.trainable = True # model.p_img.trainable = False # model.p_gate.trainable = True with tf.GradientTape() as tape: img_recon, gate_recon, means, stddev, z = model(img_gt, mode) img_loss, gate_loss, kl_loss = compute_loss_unsupervised(img_gt, gate_gt, img_recon, gate_recon, means, stddev, mode) img_loss = tf.reduce_mean(img_loss) gate_loss = tf.reduce_mean(gate_loss) beta, w_img, w_gate = regulate_weights(epoch) # weighted_loss_img = calc_weighted_loss_img(img_recon, img_gt) if mode == 0: total_loss = w_img*img_loss + w_gate*gate_loss + beta*kl_loss # total_loss = w_img * img_loss + beta * kl_loss # total_loss = weighted_loss_img + gate_loss + beta * kl_loss # total_loss = img_loss train_loss_rec_img.update_state(img_loss) train_loss_rec_gate.update_state(gate_loss) train_loss_kl.update_state(kl_loss) # TODO: later create structure for other training modes -- for now just training everything together # elif mode==1: # total_loss = img_loss + beta*kl_loss # train_kl_loss_m1(kl_loss) # elif mode==2: # total_loss = gate_loss + beta*kl_loss gradients = tape.gradient(total_loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) @tf.function def test(img_gt, gate_gt, mode): img_recon, gate_recon, means, stddev, z = model(img_gt, mode) img_loss, gate_loss, kl_loss = compute_loss_unsupervised(img_gt, gate_gt, img_recon, gate_recon, means, stddev, mode) img_loss = tf.reduce_mean(img_loss) gate_loss = tf.reduce_mean(gate_loss) if mode == 0: test_loss_rec_img.update_state(img_loss) test_loss_rec_gate.update_state(gate_loss) test_loss_kl.update_state(kl_loss) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' # 0 = all messages are logged (default behavior) # 1 = INFO messages are not printed # 2 = INFO and WARNING messages are not printed # 3 = INFO, WARNING, and ERROR messages are not printed # allow growth is possible using an env var in tf2.0 os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true' # load dataset print('Starting dataset') train_ds, test_ds = racing_utils.dataset_utils.create_dataset_csv(data_dir, batch_size, img_res, max_size=max_size) print('Done with dataset') # create model if latent_space_constraints is True: model = racing_models.cmvae.CmvaeDirect(n_z=n_z, gate_dim=4, res=img_res, trainable_model=True) else: model = racing_models.cmvae.Cmvae(n_z=n_z, gate_dim=4, res=img_res, trainable_model=True) # create optimizer optimizer = tf.keras.optimizers.Adam(lr=learning_rate) # define metrics train_loss_rec_img = tf.keras.metrics.Mean(name='train_loss_rec_img') train_loss_rec_gate = tf.keras.metrics.Mean(name='train_loss_rec_gate') train_loss_kl = tf.keras.metrics.Mean(name='train_loss_kl') test_loss_rec_img = tf.keras.metrics.Mean(name='test_loss_rec_img') test_loss_rec_gate = tf.keras.metrics.Mean(name='test_loss_rec_gate') test_loss_kl = tf.keras.metrics.Mean(name='test_loss_kl') metrics_writer = tf.summary.create_file_writer(output_dir) # check if output folder exists if not os.path.isdir(output_dir): os.makedirs(output_dir) # train print('Start training ...') mode = 0 flag = True for epoch in range(epochs): # print('MODE NOW: {}'.format(mode)) for train_images, train_labels in train_ds: train(train_images, train_labels, epoch, mode) if flag: model.summary() flag = False for test_images, test_labels in test_ds: test(test_images, test_labels, mode) # save model if epoch % 5 == 0 and epoch > 0: print('Saving weights to {}'.format(output_dir)) model.save_weights(os.path.join(output_dir, "cmvae_model_{}.ckpt".format(epoch))) if mode == 0: with metrics_writer.as_default(): tf.summary.scalar('train_loss_rec_img', train_loss_rec_img.result(), step=epoch) tf.summary.scalar('train_loss_rec_gate', train_loss_rec_gate.result(), step=epoch) tf.summary.scalar('train_loss_kl', train_loss_kl.result(), step=epoch) tf.summary.scalar('test_loss_rec_img', test_loss_rec_img.result(), step=epoch) tf.summary.scalar('test_loss_rec_gate', test_loss_rec_gate.result(), step=epoch) tf.summary.scalar('test_loss_kl', test_loss_kl.result(), step=epoch) print('Epoch {} | TRAIN: L_img: {}, L_gate: {}, L_kl: {}, L_tot: {} | TEST: L_img: {}, L_gate: {}, L_kl: {}, L_tot: {}' .format(epoch, train_loss_rec_img.result(), train_loss_rec_gate.result(), train_loss_kl.result(), train_loss_rec_img.result()+train_loss_rec_gate.result()+train_loss_kl.result(), test_loss_rec_img.result(), test_loss_rec_gate.result(), test_loss_kl.result(), test_loss_rec_img.result() + test_loss_rec_gate.result() + test_loss_kl.result() )) reset_metrics() # reset all the accumulators of metrics print('End of training')
AirSim-Drone-Racing-VAE-Imitation/cmvae/train_cmvae.py/0
{ "file_path": "AirSim-Drone-Racing-VAE-Imitation/cmvae/train_cmvae.py", "repo_id": "AirSim-Drone-Racing-VAE-Imitation", "token_count": 3684 }
82
import airsimneurips as airsim import json import numpy as np import os def to_airsim_vector(np_arr): assert np.size(np_arr) == 3 return airsim.Vector3r(np.float(np_arr[0]), np.float(np_arr[1]), np.float(np_arr[2])) def to_airsim_vectors(np_arr): return [to_airsim_vector(np_arr[i, :]) for i in range(np.size(np_arr, 0))] # these clases are only meant to be settings generator. # for everything else, there's airsimneurips.Pose() class Position(): def __init__(self, x = 0.0, y = 0.0, z = 0.0): self.x = x self.y = y self.z = z class Rotation(): def __init__(self, yaw = 0.0, pitch = 0.0, roll = 0.0): self.yaw = yaw self.pitch = pitch self.roll = roll class Pose(): def __init__(self, position, rotation): self.position = position self.rotation = rotation class AirSimSettingsCreator(object): def __init__(self, sim_mode = "Multirotor"): self.sim_mode = sim_mode self.settings_dict = {} def add_minimal(self): self.settings_dict["SeeDocsAt"] = "https://github.com/Microsoft/AirSim/blob/master/docs/settings.md" self.settings_dict["SettingsVersion"] = 1.2 self.settings_dict["SimMode"] = self.sim_mode self.settings_dict["ClockSpeed"] = 1 # can be used for camera pose or vehicle pose by passing in the right settings_key def set_pose(self, setting_key, pose): setting_key["X"] = pose.position.x setting_key["Y"] = pose.position.y setting_key["Z"] = pose.position.z setting_key["Pitch"] = pose.rotation.pitch setting_key["Roll"] = pose.rotation.roll setting_key["Yaw"] = pose.rotation.yaw def add_multirotor(self, vehicle_name, pose): assert(self.settings_dict["SimMode"] == "Multirotor") if "Vehicles" not in self.settings_dict.keys(): self.settings_dict['Vehicles'] = {} self.settings_dict['Vehicles'][vehicle_name] = {} self.settings_dict['Vehicles'][vehicle_name]["VehicleType"] = "SimpleFlight" self.set_pose(self.settings_dict['Vehicles'][vehicle_name], pose) def add_camera(self, vehicle_name, camera_name, relative_pose, image_type, image_width, image_height, fov_horizontal_degrees): # fetch vehicle setting dict vehicle_setting = self.settings_dict['Vehicles'][vehicle_name] # initialize vehicle's camera setting dict to empty vehicle_setting['Cameras'] = {} vehicle_setting['Cameras'][camera_name] = {} camera_setting = vehicle_setting['Cameras'][camera_name] self.set_pose(camera_setting, relative_pose) capture_setting = {} capture_setting['Width'] = image_width capture_setting['Height'] = image_height capture_setting['ImageType'] = image_type capture_setting['FOV_Degrees'] = fov_horizontal_degrees camera_setting['CaptureSettings'] = [capture_setting] # default linux: /home/$USER/Documents/AirSim/settings.json # default windows: C:\\Users\\%USERNAME%\\Documents\\AirSim\\settings.json def write_airsim_settings_file(self, base_filename="settings.json"): user_dir = os.path.expanduser("~") airsim_settings_dir = os.path.join(user_dir, "Documents", "AirSim") if not os.path.exists(airsim_settings_dir): os.makedirs(airsim_settings_dir) airsim_settings_abs_file_path = os.path.join(airsim_settings_dir, base_filename) with open(airsim_settings_abs_file_path, "w") as f: json.dump(self.settings_dict, f, indent=2, sort_keys=True) # usage: AirSimSettingsCreator().write_airsim_neurips_baseline_settings_file() def write_airsim_neurips_baseline_settings_file(self): instance = self.__class__() instance.add_minimal() instance.add_multirotor(vehicle_name = "drone_1", pose = Pose(Position(), Rotation())) instance.add_camera(vehicle_name = "drone_1", camera_name = 'fpv_cam', relative_pose=Pose(Position(0.25, 0.0, 0.0), Rotation()), image_type = 0, image_width = 320, image_height = 240, fov_horizontal_degrees = 90) instance.add_multirotor(vehicle_name = "drone_2", pose = Pose(Position(), Rotation())) instance.write_airsim_settings_file()
AirSim-NeurIPS2019-Drone-Racing/baselines/utils.py/0
{ "file_path": "AirSim-NeurIPS2019-Drone-Racing/baselines/utils.py", "repo_id": "AirSim-NeurIPS2019-Drone-Racing", "token_count": 1763 }
83
# Azure Monitor Contributing Guide This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com. When you submit a pull request, a CLA bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. ## Reporting Bugs/Feature Requests To report a bug or feature request, open up an [issue](https://github.com/microsoft/ApplicationInsights-Python/issues). Please provide as much information as possible. For a bug, describe your environment, provide steps to reproduce, expected behavior/actual behavior in the description, etc. ## Contributing Everyone is welcome to contribute code to this repository via GitHub pull requests (PRs). To create a new PR, fork the project in GitHub and clone the upstream repo: ```console $ git clone https://github.com/microsoft/ApplicationInsights-Python.git ``` Add your fork as an origin: ```console $ git remote add fork https://github.com/YOUR_GITHUB_USERNAME/microsoft/ApplicationInsights-python.git ``` Run tests: ```sh # make sure you have all supported versions of Python installed $ pip install tox # only first time. $ tox # execute in the root of the repository ``` Check out a new branch, make modifications and push the branch to your fork: ```sh $ git checkout -b feature # edit files $ git commit $ git push fork feature ``` Open up a pull request with your changes. ## Development This project uses [tox](https://tox.readthedocs.io) to automate some aspects of development, including testing against multiple Python versions. To install `tox`, run: ```console $ pip install tox ``` We will use the Azure Monitor distro project as an example. You can run `tox` with the following arguments: - `tox` to run all existing tox commands, including unit tests for all packages under multiple Python versions - `tox -e distro` to run the unit tests - `tox -e py310-distro` to e.g. run the API unit tests under a specific Python version - `tox -e spellcheck` to run a spellcheck on all the code - `tox -e lint` to run lint checks on all code `black` and `isort` are executed when `tox -e lint` is run. The reported errors can be tedious to fix manually. An easier way to do so is: 1. Run `.tox/lint/bin/black .` 2. Run `.tox/lint/bin/isort .` See [`tox.ini`](https://github.com/microsoft/ApplicationInsights-Python/blob/main/tox.ini) for more detail on available tox commands. ## Documentation TODO ## Licensing See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. ## CLA This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repositories using our CLA. ## Code of Conduct This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
ApplicationInsights-Python/CONTRIBUTING.md/0
{ "file_path": "ApplicationInsights-Python/CONTRIBUTING.md", "repo_id": "ApplicationInsights-Python", "token_count": 1111 }
84
[bdist_wheel] universal=1
ApplicationInsights-Python/azure-monitor-events-extension/setup.cfg/0
{ "file_path": "ApplicationInsights-Python/azure-monitor-events-extension/setup.cfg", "repo_id": "ApplicationInsights-Python", "token_count": 11 }
85
# Support ## How to file issues and get help This project has been created to accelerate the development of Trusted Research Environments by Microsoft and its partners while working with customers. This project does not have a dedicated team of maintainers outside of those who are working on an active engagements and as such issues will be responded to on a best efforts basis. No guarantees can be offered as to response times on issues, feature requests, or to the long term road map for the project. Taking the above statement into account the team involved in this effort wish for the project to evolve into a stable, production ready resource with active contributors from Microsoft and the wider community. This project uses GitHub Issues to track bugs and feature requests. Please search the existing issues before filing new issues to avoid duplicates. For new issues, file your bug or feature request as a new Issue. ## Microsoft Support Policy Support for this project is limited to the resources listed above.
AzureTRE/SUPPORT.md/0
{ "file_path": "AzureTRE/SUPPORT.md", "repo_id": "AzureTRE", "token_count": 206 }
86
{ "IsEncrypted": false, "Values": { "FUNCTIONS_WORKER_RUNTIME": "python", "AIRLOCK_STATUS_CHANGED_QUEUE_NAME": "status_changed", "AIRLOCK_SCAN_RESULT_QUEUE_NAME": "scan_result", "SB_CONNECTION_STRING": "Endpoint=sb://XXXX.servicebus.windows.net/;SharedAccessKeyName=.....", "BLOB_CREATED_TOPIC_NAME": "", "TOPIC_SUBSCRIPTION_NAME":"", "TRE_ID": "", "ENABLE_MALWARE_SCANNING": "false" } }
AzureTRE/airlock_processor/local.settings.json-sample/0
{ "file_path": "AzureTRE/airlock_processor/local.settings.json-sample", "repo_id": "AzureTRE", "token_count": 193 }
87
from fastapi import Depends, HTTPException, Path, status from pydantic import UUID4 from api.helpers import get_repository from db.repositories.airlock_requests import AirlockRequestRepository from models.domain.airlock_request import AirlockRequest from db.errors import EntityDoesNotExist, UnableToAccessDatabase from resources import strings async def get_airlock_request_by_id(airlock_request_id: UUID4, airlock_request_repo: AirlockRequestRepository) -> AirlockRequest: try: return await airlock_request_repo.get_airlock_request_by_id(airlock_request_id) except EntityDoesNotExist: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=strings.AIRLOCK_REQUEST_DOES_NOT_EXIST) except UnableToAccessDatabase: raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail=strings.STATE_STORE_ENDPOINT_NOT_RESPONDING) async def get_airlock_request_by_id_from_path(airlock_request_id: UUID4 = Path(...), airlock_request_repo=Depends(get_repository(AirlockRequestRepository))) -> AirlockRequest: return await get_airlock_request_by_id(airlock_request_id, airlock_request_repo)
AzureTRE/api_app/api/dependencies/airlock.py/0
{ "file_path": "AzureTRE/api_app/api/dependencies/airlock.py", "repo_id": "AzureTRE", "token_count": 393 }
88
from fastapi import APIRouter, Depends, HTTPException, status from db.migrations.airlock import AirlockMigration from db.migrations.resources import ResourceMigration from api.helpers import get_repository from db.repositories.operations import OperationRepository from db.repositories.resources_history import ResourceHistoryRepository from services.authentication import get_current_admin_user from resources import strings from db.migrations.shared_services import SharedServiceMigration from db.migrations.workspaces import WorkspaceMigration from db.repositories.resources import ResourceRepository from models.schemas.migrations import MigrationOutList, Migration from services.logging import logger migrations_core_router = APIRouter(dependencies=[Depends(get_current_admin_user)]) @migrations_core_router.post("/migrations", status_code=status.HTTP_202_ACCEPTED, name=strings.API_MIGRATE_DATABASE, response_model=MigrationOutList, dependencies=[Depends(get_current_admin_user)]) async def migrate_database(resources_repo=Depends(get_repository(ResourceRepository)), operations_repo=Depends(get_repository(OperationRepository)), resource_history_repo=Depends(get_repository(ResourceHistoryRepository)), shared_services_migration=Depends(get_repository(SharedServiceMigration)), workspace_migration=Depends(get_repository(WorkspaceMigration)), resource_migration=Depends(get_repository(ResourceMigration)), airlock_migration=Depends(get_repository(AirlockMigration)),): try: migrations = list() logger.info("PR 1030") await resources_repo.rename_field_name('resourceTemplateName', 'templateName') await resources_repo.rename_field_name('resourceTemplateVersion', 'templateVersion') await resources_repo.rename_field_name('resourceTemplateParameters', 'properties') migrations.append(Migration(issueNumber="PR 1030", status="Executed")) logger.info("PR 1031") await resources_repo.rename_field_name('workspaceType', 'templateName') await resources_repo.rename_field_name('workspaceServiceType', 'templateName') await resources_repo.rename_field_name('userResourceType', 'templateName') migrations.append(Migration(issueNumber="PR 1031", status="Executed")) logger.info("PR 1717 - Shared services") migration_status = "Executed" if await shared_services_migration.deleteDuplicatedSharedServices() else "Skipped" migrations.append(Migration(issueNumber="PR 1717", status=migration_status)) logger.info("PR 1726 - Authentication needs to be in properties so we can update them") migration_status = "Executed" if await workspace_migration.moveAuthInformationToProperties() else "Skipped" migrations.append(Migration(issueNumber="PR 1726", status=migration_status)) logger.info("PR 1406 - Extra field to support UI") num_rows = await resource_migration.add_deployment_status_field(operations_repo) migrations.append(Migration(issueNumber="1406", status=f'Updated {num_rows} resource objects')) logger.info("PR 3066 - Archive resources history") num_rows = await resource_migration.archive_history(resource_history_repo) migrations.append(Migration(issueNumber="3066", status=f'Updated {num_rows} resource objects')) logger.info("PR 2371 - Validate min firewall version") await shared_services_migration.checkMinFirewallVersion() migrations.append(Migration(issueNumber="2371", status='Firewall version meets requirement')) logger.info("PR 2779 - Restructure Airlock requests & add createdBy field") await airlock_migration.rename_field_name('requestType', 'type') await airlock_migration.rename_field_name('requestTitle', 'title') await airlock_migration.rename_field_name('user', 'updatedBy') await airlock_migration.rename_field_name('creationTime', 'createdWhen') num_updated = await airlock_migration.add_created_by_and_rename_in_history() migrations.append(Migration(issueNumber="2779", status=f'Renamed fields & updated {num_updated} airlock requests with createdBy')) logger.info("PR 2883 - Support multiple reviewer VMs per Airlock request") num_updated = await airlock_migration.change_review_resources_to_dict() migrations.append(Migration(issueNumber="2883", status=f'Updated {num_updated} airlock requests with new reviewUserResources format')) logger.info("PR 3152 - Migrate reviewDecision of Airlock Reviews") num_updated = await airlock_migration.update_review_decision_values() migrations.append(Migration(issueNumber="3152", status=f'Updated {num_updated} airlock requests with new reviewDecision value')) logger.info("PR 3358 - Migrate OperationSteps of Operations") num_updated = await resource_migration.migrate_step_id_of_operation_steps(operations_repo) migrations.append(Migration(issueNumber="3358", status=f'Updated {num_updated} operations')) return MigrationOutList(migrations=migrations) except Exception as e: logger.exception("Failed to migrate database") raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e))
AzureTRE/api_app/api/routes/migrations.py/0
{ "file_path": "AzureTRE/api_app/api/routes/migrations.py", "repo_id": "AzureTRE", "token_count": 2008 }
89
from resources import strings from db.repositories.airlock_requests import AirlockRequestRepository class AirlockMigration(AirlockRequestRepository): @classmethod async def create(cls): cls = AirlockMigration() resource_repo = await super().create() cls._container = resource_repo._container return cls async def add_created_by_and_rename_in_history(self) -> int: num_updated = 0 for request in await self.query('SELECT * FROM c'): # Only migrate if createdBy isn't present if 'createdBy' in request: continue # For each request, check if it has history if len(request['history']) > 0: # createdBy value will be first user in history request['createdBy'] = request['history'][0]['user'] # Also rename user to updatedBy in each history item for item in request['history']: if 'user' in item: item['updatedBy'] = item['user'] del item['user'] else: # If not, the createdBy user will be the same as the updatedBy value request['createdBy'] = request['updatedBy'] await self.update_item_dict(request) num_updated += 1 return num_updated async def change_review_resources_to_dict(self) -> int: num_updated = 0 for request in await self.query('SELECT * FROM c'): # Only migrate if airlockReviewResources property present and is a list if 'reviewUserResources' in request and isinstance(request['reviewUserResources'], list): updated_review_resources = {} for i, resource in enumerate(request['reviewUserResources']): updated_review_resources['UNKNOWN' + str(i)] = resource request['reviewUserResources'] = updated_review_resources await self.update_item_dict(request) num_updated += 1 return num_updated async def update_review_decision_values(self) -> int: num_updated = 0 for request in await self.query('SELECT * FROM c WHERE ARRAY_LENGTH(c.reviews) > 0'): request_changed = False for review in request['reviews']: old_decision = review['reviewDecision'] new_decision = old_decision if old_decision == 'approval_in_progress': new_decision = strings.AIRLOCK_REVIEW_DECISION_APPROVED if old_decision == 'rejection_in_progress': new_decision = strings.AIRLOCK_REVIEW_DECISION_REJECTED if new_decision != old_decision: request_changed = True review['reviewDecision'] = new_decision if request_changed: await self.update_item_dict(request) num_updated += 1 return num_updated
AzureTRE/api_app/db/migrations/airlock.py/0
{ "file_path": "AzureTRE/api_app/db/migrations/airlock.py", "repo_id": "AzureTRE", "token_count": 1361 }
90
from typing import Optional, List from pydantic import Field from models.domain.resource import AvailableUpgrade, ResourceType from models.domain.azuretremodel import AzureTREModel class RestrictedProperties(AzureTREModel): display_name: str = "" description: str = "" overview: str = "" connection_uri: str = "" is_exposed_externally: bool = True class RestrictedResource(AzureTREModel): """ Resource request """ id: str = Field(title="Id", description="GUID identifying the resource request") templateName: str = Field(title="Resource template name", description="The resource template (bundle) to deploy") templateVersion: str = Field(title="Resource template version", description="The version of the resource template (bundle) to deploy") properties: RestrictedProperties = Field(None, title="Restricted Properties", description="Resource properties safe to share with non-admins") availableUpgrades: Optional[List[AvailableUpgrade]] = Field(title="Available template upgrades", description="Versions of the template that are available for upgrade") isEnabled: bool = True # Must be set before a resource can be deleted resourceType: ResourceType deploymentStatus: Optional[str] = Field(title="Deployment Status", description="Overall deployment status of the resource") etag: str = Field(title="_etag", description="eTag of the document", alias="_etag") resourcePath: str = "" resourceVersion: int = 0 user: dict = {} updatedWhen: float = 0
AzureTRE/api_app/models/domain/restricted_resource.py/0
{ "file_path": "AzureTRE/api_app/models/domain/restricted_resource.py", "repo_id": "AzureTRE", "token_count": 424 }
91
from typing import List from pydantic import BaseModel, Field from models.domain.restricted_resource import RestrictedResource from models.domain.resource import ResourceType from models.domain.shared_service import SharedService def get_sample_shared_service(shared_service_id: str) -> dict: return { "id": shared_service_id, "templateName": "tre-shared-service-firewall", "templateVersion": "0.1.0", "properties": { "display_name": "My shared service", "description": "Some description", }, "resourceType": ResourceType.SharedService } class SharedServiceInResponse(BaseModel): sharedService: SharedService class Config: schema_extra = { "example": { "shared_service": get_sample_shared_service("2fdc9fba-726e-4db6-a1b8-9018a2165748") } } class RestrictedSharedServiceInResponse(BaseModel): sharedService: RestrictedResource class Config: schema_extra = { "example": { "shared_service": get_sample_shared_service("2fdc9fba-726e-4db6-a1b8-9018a2165748") } } class RestrictedSharedServicesInList(BaseModel): sharedServices: List[RestrictedResource] = Field([], title="shared services") class Config: schema_extra = { "example": { "sharedServices": [ get_sample_shared_service("2fdc9fba-726e-4db6-a1b8-9018a2165748"), get_sample_shared_service("abcc9fba-726e-4db6-a1b8-9018a2165748") ] } } class SharedServicesInList(BaseModel): sharedServices: List[SharedService] = Field([], title="shared services") class Config: schema_extra = { "example": { "sharedServices": [ get_sample_shared_service("2fdc9fba-726e-4db6-a1b8-9018a2165748"), get_sample_shared_service("abcc9fba-726e-4db6-a1b8-9018a2165748") ] } } class SharedServiceInCreate(BaseModel): templateName: str = Field(title="Shared service type", description="Bundle name") properties: dict = Field({}, title="Shared service parameters", description="Values for the parameters required by the shared service resource specification") class Config: schema_extra = { "example": { "templateName": "tre-shared-service-firewall", "properties": { "display_name": "My shared service", "description": "Some description", } } }
AzureTRE/api_app/models/schemas/shared_service.py/0
{ "file_path": "AzureTRE/api_app/models/schemas/shared_service.py", "repo_id": "AzureTRE", "token_count": 1224 }
92
{ "$schema": "http://json-schema.org/draft-07/schema", "$id": "https://github.com/microsoft/AzureTRE/schema/azuread.json", "type": "object", "title": "Azure AD Authorisation Schema", "default": {}, "required": [ ], "properties": { } }
AzureTRE/api_app/schemas/azuread.json/0
{ "file_path": "AzureTRE/api_app/schemas/azuread.json", "repo_id": "AzureTRE", "token_count": 120 }
93
from core import config, credentials from services.logging import logger from azure.mgmt.compute import ComputeManagementClient, models from azure.core.exceptions import ResourceNotFoundError def get_azure_resource_status(resource_id): resource_name = resource_id.split('/')[-1] resource_group_name = resource_id.split('/')[4] resource_type = resource_id.split('/')[-3] + '/' + resource_id.split('/')[-2] try: if resource_type == 'Microsoft.Compute/virtualMachines': vm_instance_view: models.VirtualMachineInstanceView = get_azure_vm_instance_view(resource_name, resource_group_name) power_state = None if vm_instance_view.statuses is not None: power_states = [x for x in vm_instance_view.statuses if x.code is not None and x.code.startswith('PowerState')] if len(power_states) > 0: power_state = power_states[0].display_status return {"powerState": power_state} except ResourceNotFoundError: logger.warning(f"Unable to query resource status for {resource_id}, as the resource was not found.") return {} def get_azure_vm_instance_view(vm_name, resource_group_name) -> models.VirtualMachineInstanceView: compute_client = ComputeManagementClient(credentials.get_credential(), subscription_id=config.SUBSCRIPTION_ID, base_url=config.RESOURCE_MANAGER_ENDPOINT, credential_scopes=config.CREDENTIAL_SCOPES) return compute_client.virtual_machines.instance_view(resource_group_name, vm_name)
AzureTRE/api_app/services/azure_resource_status.py/0
{ "file_path": "AzureTRE/api_app/services/azure_resource_status.py", "repo_id": "AzureTRE", "token_count": 704 }
94
from unittest.mock import AsyncMock import pytest import pytest_asyncio from mock import patch from models.domain.resource import ResourceType from db.migrations.workspaces import WorkspaceMigration pytestmark = pytest.mark.asyncio @pytest_asyncio.fixture async def workspace_migrator(): with patch('api.dependencies.database.Database.get_container_proxy', return_value=AsyncMock()): workspace_migrator = await WorkspaceMigration.create() yield workspace_migrator def get_sample_old_workspace(workspace_id: str = "7ab18f7e-ee8f-4202-8d46-747818ec76f4", spec_workspace_id: str = "0001") -> dict: return [{ "id": workspace_id, "templateName": "tre-workspace-base", "templateVersion": "0.1.0", "properties": { "app_id": "03f18f7e-ee8f-4202-8d46-747818ec76f4", "azure_location": "westeurope", "workspace_id": spec_workspace_id, "tre_id": "mytre-dev-1234", "address_space_size": "small", }, "resourceType": ResourceType.Workspace, "workspaceURL": "", "authInformation": { "sp_id": "f153f0f4-e89a-4456-b7ba-d0c46571d7c8", "roles": { "WorkspaceResearcher": "100358cf-5c65-4dfb-88b8-ed87fdc59db0", "WorkspaceOwner": "682df69e-bf3c-4606-85ab-75d70c0d510f" }, "app_id": "03f18f7e-ee8f-4202-8d46-747818ec76f4" }, }] async def test_workspace_migration_moves_fields(workspace_migrator): workspace_migrator.query = AsyncMock(return_value=get_sample_old_workspace()) assert (await workspace_migrator.moveAuthInformationToProperties())
AzureTRE/api_app/tests_ma/test_db/test_migrations/test_workspace_migration.py/0
{ "file_path": "AzureTRE/api_app/tests_ma/test_db/test_migrations/test_workspace_migration.py", "repo_id": "AzureTRE", "token_count": 792 }
95
### # Get all workspace templates (admin) GET {{baseUrl}}/workspace-templates Accept: {{contentType}} Authorization: Bearer {{token}} ### # Get details about the 'my-tre-workspace' template (admin) GET {{baseUrl}}/workspace-templates/{{workspaceTemplate}} Accept: {{contentType}} Authorization: Bearer {{token}} ### # Get all workspace service templates (user) GET {{baseUrl}}/workspace-service-templates Accept: {{contentType}} Authorization: Bearer {{token}} ### # Get details about the current 'my-tre-workspace-service' template (user) GET {{baseUrl}}/workspace-service-templates/{{workspaceServiceTemplate}} Accept: {{contentType}} Authorization: Bearer {{token}} ### # Get all user resource templates for "my-tre-worksapce-service" (user) GET {{baseUrl}}/workspace-service-templates/{{workspaceServiceTemplate}}/user-resource-templates Accept: {{contentType}} Authorization: Bearer {{token}} ### # Get details about the current 'my-tre-user-resource' template (user) GET {{baseUrl}}/workspace-service-templates/{{workspaceServiceTemplate}}/user-resource-templates/{{userResourceTemplate}} Accept: {{contentType}} Authorization: Bearer {{token}}
AzureTRE/api_http_requests/API Template GET Endpoints.http/0
{ "file_path": "AzureTRE/api_http_requests/API Template GET Endpoints.http", "repo_id": "AzureTRE", "token_count": 341 }
96
import logging import click from tre.api_client import ApiClient @click.command(name="api", help="Call an API endpoint") @click.option("--url", required=True, help="The API URL to call, e.g. /api/workspaces") @click.option("--scope", required=False, help="The login scope for the API call") def call_api(url, scope): log = logging.getLogger(__name__) client = ApiClient.get_api_client_from_config() response = client.call_api(log, url, scope) click.echo(response.text + '\n')
AzureTRE/cli/tre/commands/api_call.py/0
{ "file_path": "AzureTRE/cli/tre/commands/api_call.py", "repo_id": "AzureTRE", "token_count": 225 }
97
import click import json import logging from tre.api_client import ApiClient from tre.commands.operation import default_operation_table_query_single, operation_show from tre.output import output, output_option, query_option @click.group(help="List/add shared_services") def shared_services() -> None: pass @click.command(name="list", help="List shared_services") @output_option() @query_option() def shared_services_list(output_format, query): log = logging.getLogger(__name__) client = ApiClient.get_api_client_from_config() response = client.call_api(log, 'GET', '/api/shared-services') output(response, output_format=output_format, query=query, default_table_query=r"sharedServices[].{id:id,name:templateName, version:templateVersion, is_enabled:isEnabled, status: deploymentStatus}") @click.command(name="new", help="Create a new shared_service") @click.option('--definition', help='JSON definition for the shared_service', required=False) @click.option('--definition-file', help='File containing JSON definition for the shared_service', required=False, type=click.File("r")) @click.option('--no-wait', flag_value=True, default=False) @output_option() @query_option() @click.pass_context def shared_services_create(ctx, definition, definition_file, no_wait, output_format, query): log = logging.getLogger(__name__) if definition is None: if definition_file is None: raise click.UsageError('Please specify either a definition or a definition file') definition = definition_file.read() definition_dict = json.loads(definition) client = ApiClient.get_api_client_from_config() click.echo("Creating shared_service...", err=True) response = client.call_api(log, 'POST', '/api/shared-services', json_data=definition_dict) if no_wait: output(response, output_format=output_format, query=query, default_table_query=default_operation_table_query_single()) else: operation_url = response.headers['location'] operation_show(log, operation_url, no_wait=False, output_format=output_format, query=query) shared_services.add_command(shared_services_list) shared_services.add_command(shared_services_create)
AzureTRE/cli/tre/commands/shared_services/shared_services.py/0
{ "file_path": "AzureTRE/cli/tre/commands/shared_services/shared_services.py", "repo_id": "AzureTRE", "token_count": 732 }
98
import click import logging from tre.api_client import ApiClient from tre.commands.workspaces.airlock.contexts import WorkspaceAirlockContext, pass_workspace_airlock_context from tre.output import output, output_option, query_option _default_table_query_item = r"airlockRequest.{id:id,workspace_id:workspaceId,type:type, title:title,status:status,business_justification:businessJustification}" def airlock_id_completion(ctx: click.Context, param: click.Parameter, incomplete: str): log = logging.getLogger(__name__) parent_ctx = ctx.parent workspace_id = parent_ctx.params["workspace_id"] client = ApiClient.get_api_client_from_config() workspace_scope = client.get_workspace_scope(log, workspace_id) response = client.call_api(log, 'GET', f'/api/workspaces/{workspace_id}/requests', scope_id=workspace_scope) if response.is_success: ids = [request["airlockRequest"]["id"] for request in response.json()["airlockRequests"]] return [id for id in ids if id.startswith(incomplete)] @click.group(name="airlock-request", invoke_without_command=True, help="Perform actions on an airlock request") @click.argument('airlock_id', required=True, type=click.UUID, shell_complete=airlock_id_completion) @click.pass_context def airlock(ctx: click.Context, airlock_id: str) -> None: ctx.obj = WorkspaceAirlockContext.add_airlock_id_to_context_obj(ctx, airlock_id) @click.command(name="show", help="Show airlock request") @output_option() @query_option() @pass_workspace_airlock_context def airlock_show(airlock_context: WorkspaceAirlockContext, output_format, query) -> None: log = logging.getLogger(__name__) workspace_id = airlock_context.workspace_id if workspace_id is None: raise click.UsageError('Missing workspace ID') airlock_id = airlock_context.airlock_id if airlock_id is None: raise click.UsageError('Missing airlock request ID') client = ApiClient.get_api_client_from_config() workspace_scope = client.get_workspace_scope(log, workspace_id) response = client.call_api( log, 'GET', f'/api/workspaces/{workspace_id}/requests/{airlock_id}', scope_id=workspace_scope, ) output(response, output_format=output_format, query=query, default_table_query=_default_table_query_item) @click.command(name="get-url", help="Get URL to access airlock request") @output_option() @query_option() @pass_workspace_airlock_context def airlock_get_url(airlock_context: WorkspaceAirlockContext, output_format, query) -> None: log = logging.getLogger(__name__) workspace_id = airlock_context.workspace_id if workspace_id is None: raise click.UsageError('Missing workspace ID') airlock_id = airlock_context.airlock_id if airlock_id is None: raise click.UsageError('Missing service ID') client = ApiClient.get_api_client_from_config() workspace_scope = client.get_workspace_scope(log, workspace_id) response = client.call_api( log, 'GET', f'/api/workspaces/{workspace_id}/requests/{airlock_id}/link', scope_id=workspace_scope, ) output(response, output_format=output_format, query=query, default_table_query=r"{container_url:containerUrl}") @click.command(name="submit", help="Submit an airlock request (after uploading content)") @output_option() @query_option() @pass_workspace_airlock_context def airlock_submit(airlock_context: WorkspaceAirlockContext, output_format, query) -> None: log = logging.getLogger(__name__) workspace_id = airlock_context.workspace_id if workspace_id is None: raise click.UsageError('Missing workspace ID') airlock_id = airlock_context.airlock_id if airlock_id is None: raise click.UsageError('Missing airlock request ID') client = ApiClient.get_api_client_from_config() workspace_scope = client.get_workspace_scope(log, workspace_id) response = client.call_api( log, 'POST', f'/api/workspaces/{workspace_id}/requests/{airlock_id}/submit', scope_id=workspace_scope, ) output( response, output_format=output_format, query=query, default_table_query=_default_table_query_item) @click.command(name="review", help="Provide a review response for an airlock request") @click.option('--approve/--reject', 'approve', required=True, help="Approved/rejected") @click.option('--reason', required=True, help="Reason for approval/rejection") @output_option() @query_option() @pass_workspace_airlock_context def airlock_review(airlock_context: WorkspaceAirlockContext, approve, reason, output_format, query) -> None: log = logging.getLogger(__name__) workspace_id = airlock_context.workspace_id if workspace_id is None: raise click.UsageError('Missing workspace ID') airlock_id = airlock_context.airlock_id if airlock_id is None: raise click.UsageError('Missing airlock request ID') client = ApiClient.get_api_client_from_config() workspace_scope = client.get_workspace_scope(log, workspace_id) response = client.call_api( log, 'POST', f'/api/workspaces/{workspace_id}/requests/{airlock_id}/review', json_data={ "approval": approve, "decisionExplanation": reason, }, scope_id=workspace_scope, ) output( response, output_format=output_format, query=query, default_table_query=_default_table_query_item) @click.command(name="cancel", help="Cancel an airlock request") @output_option() @query_option() @pass_workspace_airlock_context def airlock_cancel(airlock_context: WorkspaceAirlockContext, output_format, query) -> None: log = logging.getLogger(__name__) workspace_id = airlock_context.workspace_id if workspace_id is None: raise click.UsageError('Missing workspace ID') airlock_id = airlock_context.airlock_id if airlock_id is None: raise click.UsageError('Missing airlock request ID') client = ApiClient.get_api_client_from_config() workspace_scope = client.get_workspace_scope(log, workspace_id) response = client.call_api( log, 'POST', f'/api/workspaces/{workspace_id}/requests/{airlock_id}/cancel', scope_id=workspace_scope, ) output( response, output_format=output_format, query=query, default_table_query=_default_table_query_item) airlock.add_command(airlock_show) airlock.add_command(airlock_get_url) airlock.add_command(airlock_submit) airlock.add_command(airlock_review) airlock.add_command(airlock_cancel)
AzureTRE/cli/tre/commands/workspaces/airlock/request.py/0
{ "file_path": "AzureTRE/cli/tre/commands/workspaces/airlock/request.py", "repo_id": "AzureTRE", "token_count": 2525 }
99
import json import logging import click from tre.api_client import ApiClient from tre.commands.operation import default_operation_table_query_single, operation_show from tre.output import output, output_option, query_option from .contexts import WorkspaceServiceContext, pass_workspace_service_context from .operation import workspace_service_operation from .operations import workspace_service_operations from .user_resources.user_resource import user_resource from .user_resources.user_resources import user_resources def workspace_service_id_completion(ctx: click.Context, param: click.Parameter, incomplete: str): log = logging.getLogger(__name__) parent_ctx = ctx.parent workspace_id = parent_ctx.params["workspace_id"] client = ApiClient.get_api_client_from_config() workspace_scope = client.get_workspace_scope(log, workspace_id) response = client.call_api(log, 'GET', f'/api/workspaces/{workspace_id}/workspace-services', scope_id=workspace_scope) if response.is_success: ids = [workspace["id"] for workspace in response.json()["workspaceServices"]] return [id for id in ids if id.startswith(incomplete)] @click.group(name="workspace-service", invoke_without_command=True, help="Perform actions on an workspace-service") @click.argument('workspace_service_id', required=True, type=click.UUID, shell_complete=workspace_service_id_completion) @click.pass_context def workspace_service(ctx: click.Context, workspace_service_id) -> None: ctx.obj = WorkspaceServiceContext.add_service_id_to_context_obj(ctx, workspace_service_id) @click.command(name="show", help="Workspace service") @output_option() @query_option() @pass_workspace_service_context def workspace_service_show(workspace_service_context: WorkspaceServiceContext, output_format, query) -> None: log = logging.getLogger(__name__) workspace_id = workspace_service_context.workspace_id if workspace_id is None: raise click.UsageError('Missing workspace ID') workspace_service_id = workspace_service_context.workspace_service_id if workspace_service_id is None: raise click.UsageError('Missing service ID') client = ApiClient.get_api_client_from_config() workspace_scope = client.get_workspace_scope(log, workspace_id) response = client.call_api( log, 'GET', f'/api/workspaces/{workspace_id}/workspace-services/{workspace_service_id}', scope_id=workspace_scope, ) output(response, output_format=output_format, query=query, default_table_query=r"workspaceService.{id:id,template_name:templateName,template_version:templateVersion,sdeployment_status:deploymentStatus}") @click.command(name="update", help="Update a workspace service") @click.option('--etag', help='The etag of the workspace service to update', required=True) @click.option('--definition', help='JSON definition for the workspace service', required=False) @click.option('--definition-file', help='File containing JSON definition for the workspace service', required=False, type=click.File("r")) @click.option('--no-wait', flag_value=True, default=False) @output_option() @query_option() @pass_workspace_service_context def workspace_service_update(workspace_service_context: WorkspaceServiceContext, etag, definition, definition_file, no_wait, output_format, query, suppress_output: bool = False): log = logging.getLogger(__name__) workspace_id = workspace_service_context.workspace_id if workspace_id is None: raise click.UsageError('Missing workspace ID') workspace_service_id = workspace_service_context.workspace_service_id if workspace_service_id is None: raise click.UsageError('Missing service ID') if definition is None: if definition_file is None: raise click.UsageError('Please specify either a definition or a definition file') definition = definition_file.read() definition_dict = json.loads(definition) client = ApiClient.get_api_client_from_config() workspace_scope = client.get_workspace_scope(log, workspace_id) response = client.call_api( log, 'PATCH', f'/api/workspaces/{workspace_id}/workspace-services/{workspace_service_id}', headers={'etag': etag}, json_data=definition_dict, scope_id=workspace_scope) if no_wait: output(response, output_format=output_format, query=query, default_table_query=default_operation_table_query_single()) else: operation_url = response.headers['location'] operation_show( log, operation_url, no_wait=False, output_format=output_format, query=query, suppress_output=suppress_output, scope_id=workspace_scope) @click.command(name="set-enabled", help="Enable/disable a workspace service") @click.option('--etag', help='The etag of the workspace service to update', required=True) @click.option('--enable/--disable', is_flag=True, required=True) @click.option('--no-wait', flag_value=True, default=False) @output_option() @query_option() @pass_workspace_service_context def workspace_service_set_enabled(workspace_service_context: WorkspaceServiceContext, etag, enable, no_wait, output_format, query, suppress_output: bool = False): log = logging.getLogger(__name__) workspace_id = workspace_service_context.workspace_id if workspace_id is None: raise click.UsageError('Missing workspace ID') workspace_service_id = workspace_service_context.workspace_service_id if workspace_service_id is None: raise click.UsageError('Missing service ID') client = ApiClient.get_api_client_from_config() click.echo(f"Setting isEnabled to {enable}...", err=True) workspace_scope = client.get_workspace_scope(log, workspace_id) response = client.call_api( log, 'PATCH', f'/api/workspaces/{workspace_id}/workspace-services/{workspace_service_id}', headers={'etag': etag}, json_data={'isEnabled': enable}, scope_id=workspace_scope) if no_wait: if not suppress_output or not response.is_success: output(response, output_format=output_format, query=query, default_table_query=default_operation_table_query_single()) else: operation_url = response.headers['location'] operation_show( log, operation_url, no_wait=False, output_format=output_format, query=query, suppress_output=suppress_output, scope_id=workspace_scope) @click.command(name="delete", help="Delete a workspace service") @click.option('--yes', is_flag=True, default=False) @click.option('--no-wait', flag_value=True, default=False) @click.option('--ensure-disabled', help="Ensure disabled before deleting (resources are required to be disabled before deleting)", flag_value=True, default=False) @output_option() @query_option() @click.pass_context @pass_workspace_service_context def workspace_service_delete(workspace_service_context: WorkspaceServiceContext, ctx: click.Context, yes, no_wait, ensure_disabled, output_format, query): log = logging.getLogger(__name__) workspace_id = workspace_service_context.workspace_id if workspace_id is None: raise click.UsageError('Missing workspace ID') workspace_service_id = workspace_service_context.workspace_service_id if workspace_service_id is None: raise click.UsageError('Missing service ID') if not yes: click.confirm("Are you sure you want to delete this workspace service?", err=True, abort=True) client = ApiClient.get_api_client_from_config() workspace_scope = client.get_workspace_scope(log, workspace_id) if ensure_disabled: response = client.call_api( log, 'GET', f'/api/workspaces/{workspace_id}/workspace-services/{workspace_service_id}', scope_id=workspace_scope) workspace_service_json = response.json() if workspace_service_json['workspaceService']['isEnabled']: etag = workspace_service_json['workspaceService']['_etag'] ctx.invoke( workspace_service_set_enabled, etag=etag, enable=False, no_wait=False, suppress_output=True ) click.echo("Deleting workspace service...", err=True) response = client.call_api( log, 'DELETE', f'/api/workspaces/{workspace_id}/workspace-services/{workspace_service_id}', scope_id=workspace_scope) if no_wait: output(response, output_format=output_format, query=query, default_table_query=default_operation_table_query_single()) else: operation_url = response.headers['location'] operation_show(log, operation_url, no_wait, output_format=output_format, query=query, scope_id=workspace_scope) @click.command(name="invoke-action", help="Invoke an action on a workspace service") @click.argument("action-name", required=True) @click.option("--no-wait", flag_value=True, default=False) @output_option() @query_option() @pass_workspace_service_context def workspace_service_invoke_action( workspace_service_context: WorkspaceServiceContext, action_name, no_wait, output_format, query, ): log = logging.getLogger(__name__) workspace_id = workspace_service_context.workspace_id if workspace_id is None: raise click.UsageError('Missing workspace ID') workspace_service_id = workspace_service_context.workspace_service_id if workspace_service_id is None: raise click.UsageError('Missing service ID') client = ApiClient.get_api_client_from_config() workspace_scope = client.get_workspace_scope(log, workspace_id) click.echo(f"Invoking action {action_name}...\n", err=True) response = client.call_api( log, "POST", f"/api/workspaces/{workspace_id}/workspace-services/{workspace_service_id}/invoke-action", scope_id=workspace_scope, params={"action": action_name}, ) if no_wait: output(response, output_format=output_format, query=query) else: operation_url = response.headers["location"] operation_show( log, operation_url, no_wait=False, output_format=output_format, query=query, scope_id=workspace_scope, ) workspace_service.add_command(workspace_service_show) workspace_service.add_command(workspace_service_update) workspace_service.add_command(workspace_service_set_enabled) workspace_service.add_command(workspace_service_operation) workspace_service.add_command(workspace_service_operations) workspace_service.add_command(workspace_service_delete) workspace_service.add_command(workspace_service_invoke_action) workspace_service.add_command(user_resource) workspace_service.add_command(user_resources)
AzureTRE/cli/tre/commands/workspaces/workspace_services/workspace_service.py/0
{ "file_path": "AzureTRE/cli/tre/commands/workspaces/workspace_services/workspace_service.py", "repo_id": "AzureTRE", "token_count": 4204 }
100
variable "tre_id" { type = string } variable "location" { type = string } variable "resource_group_name" { type = string } variable "airlock_storage_subnet_id" { type = string } variable "airlock_events_subnet_id" { type = string } variable "enable_local_debugging" { type = bool } variable "myip" { type = string } variable "api_principal_id" { type = string } variable "docker_registry_server" { type = string description = "Docker registry server" } variable "airlock_processor_image_repository" { type = string description = "Repository for Airlock processor image" default = "microsoft/azuretre/airlock-processor" } variable "mgmt_resource_group_name" { type = string description = "Shared management resource group" } variable "mgmt_acr_name" { type = string description = "Management ACR name" } variable "airlock_app_service_plan_sku" { type = string default = "P1v3" } variable "airlock_processor_subnet_id" { type = string } variable "applicationinsights_connection_string" { type = string } variable "airlock_servicebus" { type = object({ id = string default_primary_connection_string = string }) } variable "tre_core_tags" { type = map(string) } variable "enable_malware_scanning" { type = bool description = "If False, Airlock requests will skip the malware scanning stage" } variable "arm_environment" { type = string } variable "log_analytics_workspace_id" { type = string } variable "blob_core_dns_zone_id" { type = string } variable "file_core_dns_zone_id" { type = string } variable "queue_core_dns_zone_id" { type = string } variable "table_core_dns_zone_id" { type = string }
AzureTRE/core/terraform/airlock/variables.tf/0
{ "file_path": "AzureTRE/core/terraform/airlock/variables.tf", "repo_id": "AzureTRE", "token_count": 657 }
101
variable "tre_id" { type = string } variable "location" { type = string } variable "resource_group_name" { type = string } variable "shared_subnet_id" { type = string } variable "azure_monitor_dns_zone_id" { type = string } variable "azure_monitor_oms_opinsights_dns_zone_id" { type = string } variable "azure_monitor_ods_opinsights_dns_zone_id" { type = string } variable "azure_monitor_agentsvc_dns_zone_id" { type = string } variable "blob_core_dns_zone_id" { type = string } variable "tre_core_tags" { type = map(string) } variable "enable_local_debugging" { type = bool }
AzureTRE/core/terraform/azure-monitor/variables.tf/0
{ "file_path": "AzureTRE/core/terraform/azure-monitor/variables.tf", "repo_id": "AzureTRE", "token_count": 231 }
102
locals { core_services_vnet_subnets = cidrsubnets(var.core_address_space, 4, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4) # Addresses examples are based on /22 CIDR # .0 firewall_subnet_address_space = local.core_services_vnet_subnets[0] # .0 - .63 app_gw_subnet_address_prefix = local.core_services_vnet_subnets[1] # .64 - .127 bastion_subnet_address_prefix = local.core_services_vnet_subnets[2] # .128 - .191 web_app_subnet_address_prefix = local.core_services_vnet_subnets[3] # .192 - .254 # .1 shared_services_subnet_address_prefix = local.core_services_vnet_subnets[4] # .0 - .254 # .2 airlock_processor_subnet_address_prefix = local.core_services_vnet_subnets[5] # .0 - .63 airlock_storage_subnet_address_prefix = local.core_services_vnet_subnets[6] # .64 - .127 airlock_events_subnet_address_prefix = local.core_services_vnet_subnets[7] # .128 - .191 airlock_notifications_subnet_address_prefix = local.core_services_vnet_subnets[8] # .192 - .254 # .3 resource_processor_subnet_address_prefix = local.core_services_vnet_subnets[9] # .0 - .63 firewall_management_subnet_address_prefix = local.core_services_vnet_subnets[10] # .64 - .127 # FREE = local.core_services_vnet_subnets[11] # .128 - .191 # FREE = local.core_services_vnet_subnets[12] # .192 - .254 tre_core_tags = { tre_id = var.tre_id tre_core_service_id = var.tre_id } private_dns_zone_names = toset([ "privatelink.queue.core.windows.net", "privatelink.table.core.windows.net" ]) }
AzureTRE/core/terraform/network/locals.tf/0
{ "file_path": "AzureTRE/core/terraform/network/locals.tf", "repo_id": "AzureTRE", "token_count": 633 }
103
resource "azurerm_servicebus_namespace" "sb" { name = "sb-${var.tre_id}" location = azurerm_resource_group.core.location resource_group_name = azurerm_resource_group.core.name sku = "Premium" capacity = "1" tags = local.tre_core_tags # Block public access # See https://docs.microsoft.com/azure/service-bus-messaging/service-bus-service-endpoints network_rule_set { ip_rules = var.enable_local_debugging ? [local.myip] : null # Allows the Eventgrid to access the SB trusted_services_allowed = true # We must enable the Airlock events subnet to access the SB, as the Eventgrid topics can't send messages over PE # https://docs.microsoft.com/en-us/azure/event-grid/consume-private-endpoints default_action = "Deny" public_network_access_enabled = true network_rules { subnet_id = module.network.airlock_events_subnet_id ignore_missing_vnet_service_endpoint = false } } lifecycle { ignore_changes = [tags] } } resource "azurerm_servicebus_queue" "workspacequeue" { name = "workspacequeue" namespace_id = azurerm_servicebus_namespace.sb.id enable_partitioning = false requires_session = true # use sessions here to make sure updates to each resource happen in serial, in order } resource "azurerm_servicebus_queue" "service_bus_deployment_status_update_queue" { name = "deploymentstatus" namespace_id = azurerm_servicebus_namespace.sb.id # The returned payload might be large, especially for errors. # Cosmos is the final destination of the messages where 2048 is the limit. max_message_size_in_kilobytes = 2048 # default=1024 enable_partitioning = false requires_session = true } resource "azurerm_private_dns_zone" "servicebus" { name = module.terraform_azurerm_environment_configuration.private_links["privatelink.servicebus.windows.net"] resource_group_name = azurerm_resource_group.core.name tags = local.tre_core_tags lifecycle { ignore_changes = [tags] } } resource "azurerm_private_dns_zone_virtual_network_link" "servicebuslink" { name = "servicebuslink" resource_group_name = azurerm_resource_group.core.name private_dns_zone_name = azurerm_private_dns_zone.servicebus.name virtual_network_id = module.network.core_vnet_id tags = local.tre_core_tags lifecycle { ignore_changes = [tags] } } resource "azurerm_private_endpoint" "sbpe" { name = "pe-${azurerm_servicebus_namespace.sb.name}" location = azurerm_resource_group.core.location resource_group_name = azurerm_resource_group.core.name subnet_id = module.network.resource_processor_subnet_id tags = local.tre_core_tags lifecycle { ignore_changes = [tags] } private_dns_zone_group { name = "private-dns-zone-group" private_dns_zone_ids = [azurerm_private_dns_zone.servicebus.id] } private_service_connection { name = "psc-${azurerm_servicebus_namespace.sb.name}" private_connection_resource_id = azurerm_servicebus_namespace.sb.id is_manual_connection = false subresource_names = ["namespace"] } # private endpoints in serial depends_on = [ azurerm_private_endpoint.filepe ] } resource "azurerm_monitor_diagnostic_setting" "sb" { name = "diagnostics-${azurerm_servicebus_namespace.sb.name}" target_resource_id = azurerm_servicebus_namespace.sb.id log_analytics_workspace_id = module.azure_monitor.log_analytics_workspace_id dynamic "enabled_log" { for_each = setintersection(data.azurerm_monitor_diagnostic_categories.sb.log_category_types, local.servicebus_diagnostic_categories_enabled) content { category = enabled_log.value } } metric { category = "AllMetrics" enabled = true } lifecycle { ignore_changes = [log_analytics_destination_type] } }
AzureTRE/core/terraform/servicebus.tf/0
{ "file_path": "AzureTRE/core/terraform/servicebus.tf", "repo_id": "AzureTRE", "token_count": 1642 }
104
#!/bin/bash # This script is designed to be `source`d to create reusable helper functions # This script polls looking for an app registration with the given ID. # If after the number of retries no app registration is found, the function exits. function wait_for_new_service_principal() { servicePrincipalId=$1 retries=10 counter=0 local msGraphUri="" msGraphUri="$(az cloud show --query endpoints.microsoftGraphResourceId --output tsv)/v1.0" output=$(az rest --method GET --uri "${msGraphUri}/servicePrincipals/${servicePrincipalId}" 2>/dev/null || true) while [[ -z $output && $counter -lt $retries ]]; do counter=$((counter+1)) echo "Waiting for service principal with ID ${servicePrincipalId} to show up (${counter}/${retries})..." sleep 5 output=$(az rest --method GET --uri "${msGraphUri}/servicePrincipals/${servicePrincipalId}" 2>/dev/null || true) done if [[ -z $output ]]; then echo "Failed" exit 1 fi echo "Service principal with ID ${servicePrincipalId} found" }
AzureTRE/devops/scripts/aad/wait_for_new_service_principal.sh/0
{ "file_path": "AzureTRE/devops/scripts/aad/wait_for_new_service_principal.sh", "repo_id": "AzureTRE", "token_count": 345 }
105
#!/bin/bash if [[ -z ${TRE_ID:-} ]]; then echo "TRE_ID environment variable must be set." exit 1 fi echo "DEBUG: Check keyvault and secrets exist" echo "az keyvault show" az keyvault show --name kv-${TRE_ID} echo "az keyvault secret list" az keyvault secret list --vault-name kv-${TRE_ID} echo "az keyvault secret list-deleted" az keyvault secret list-deleted --vault-name kv-${TRE_ID}
AzureTRE/devops/scripts/key_vault_list.sh/0
{ "file_path": "AzureTRE/devops/scripts/key_vault_list.sh", "repo_id": "AzureTRE", "token_count": 157 }
106
#!/bin/bash set -o errexit set -o pipefail set -o nounset # Baseline Azure resources echo -e "\n\e[34m»»» 🤖 \e[96mCreating resource group and storage account\e[0m..." # shellcheck disable=SC2154 az group create --resource-group "$TF_VAR_mgmt_resource_group_name" --location "$LOCATION" -o table # shellcheck disable=SC2154 az storage account create --resource-group "$TF_VAR_mgmt_resource_group_name" \ --name "$TF_VAR_mgmt_storage_account_name" --location "$LOCATION" \ --allow-blob-public-access false \ --kind StorageV2 --sku Standard_LRS -o table # Blob container # shellcheck disable=SC2154 az storage container create --account-name "$TF_VAR_mgmt_storage_account_name" --name "$TF_VAR_terraform_state_container_name" --auth-mode login -o table # logs container az storage container create --account-name "$TF_VAR_mgmt_storage_account_name" --name "tflogs" --auth-mode login -o table cat > bootstrap_backend.tf <<BOOTSTRAP_BACKEND terraform { backend "azurerm" { resource_group_name = "$TF_VAR_mgmt_resource_group_name" storage_account_name = "$TF_VAR_mgmt_storage_account_name" container_name = "$TF_VAR_terraform_state_container_name" key = "bootstrap.tfstate" } } BOOTSTRAP_BACKEND # Set up Terraform echo -e "\n\e[34m»»» ✨ \e[96mTerraform init\e[0m..." terraform init -input=false -backend=true -reconfigure # Import the storage account & res group into state echo -e "\n\e[34m»»» 📤 \e[96mImporting resources to state\e[0m..." if ! terraform state show azurerm_resource_group.mgmt > /dev/null; then echo "/subscriptions/$ARM_SUBSCRIPTION_ID/resourceGroups/$TF_VAR_mgmt_resource_group_name" terraform import azurerm_resource_group.mgmt "/subscriptions/$ARM_SUBSCRIPTION_ID/resourceGroups/$TF_VAR_mgmt_resource_group_name" fi if ! terraform state show azurerm_storage_account.state_storage > /dev/null; then terraform import azurerm_storage_account.state_storage "/subscriptions/$ARM_SUBSCRIPTION_ID/resourceGroups/$TF_VAR_mgmt_resource_group_name/providers/Microsoft.Storage/storageAccounts/$TF_VAR_mgmt_storage_account_name" fi echo "State imported" set +o nounset
AzureTRE/devops/terraform/bootstrap.sh/0
{ "file_path": "AzureTRE/devops/terraform/bootstrap.sh", "repo_id": "AzureTRE", "token_count": 788 }
107
# Registering Templates To enable users to deploy Workspaces, Workspace Services or User Resources, we need to register their Templates. This can be done wither by running `make` commands; using the API or devops scripts. In this article both approaches are described. !!! info Templates are encapsulated in [Porter](https://porter.sh) bundles. ## Registration with make commands Porter bundles can be prepared and registered with `make` commands, which can be useful for CI/CD scenarios. Before registering, Porter bundles must be built (`make bundle-build`) and then published to the TRE registry (`make bundle publish`), before finally registering it for use with the TRE using `make bundle-register`. Here we use the Azure ML workspace service bundle as an example: ```cmd make bundle-build DIR=templates/workspace_services/azureml make bundle-publish DIR=templates/workspace_services/azureml make bundle-register DIR=templates/workspace_services/azureml BUNDLE_TYPE=workspace_service ``` If you're building, publishing and registering a lot of bundles, using three separate commands can be cumbersome, so there is a unifed command for each bundle type to make this easier: ```cmd make workspace_service_bundle BUNDLE=azureml ``` There are also `make workspace_bundle`, `make shared_service_bundle` and `make user_resource_bundle` commands for the corresponding bundle resource types. !!! tip The `make user_resource_bundle` also requires a `WORKSPACE_SERVICE` parameter to be passed alongside `BUNDLE` which specifies the workspace service that the user resource belongs to. ## Registration using Swagger UI Porter bundles can also be registered interactively using the Swagger UI. For that we need to build and publish the porter bundle 1. Build the Porter bundle ```cmd make bundle-build DIR=templates/workspace_services/azureml make bundle-publish DIR=templates/workspace_services/azureml ``` 1. Use the utility script to generate the payload. The script needs to be executed from within the bundle directory, for example `/templates/workspaces/base/` ```cmd ../../../devops/scripts/register_bundle_with_api.sh -r <acr_name> -t workspace --dry-run ``` Copy the resulting JSON payload. 1. Navigate to the Swagger UI at `/api/docs` 1. Log into the Swagger UI using `Authorize` 1. Click `Try it out` on the `POST` `/api/workspace-templates` operation: ![Post Workspace Template](../assets/post-template.png) 1. Paste the payload json generated earlier into the `Request body` field, then click `Execute`. Review the server response. 1. Verify the template registration using the `GET` operation on `/api/workspace-templates`. The name of the template should now be listed. ## Registration using script To use the script to automatically register the template, you must create a user that does not require an interactive login per the [e2e test user documentation here](../tre-admins/auth.md#tre-e2e-test). The script needs to be executed from within the bundle directory, for example `/templates/workspaces/base/` ```cmd Usage: ../../../devops/scripts/register_bundle_with_api.sh [-c --current] Options: -r, --acr-name Azure Container Registry Name -t, --bundle-type Bundle type: workspace, workspace_service, user_resource or shared_service -w, --workspace-service-name The template name of the user resource (if registering a user_resource) -c, --current Make this the currently deployed version of this template -v, --verify Verify registration with the API ``` In addition to generating the payload, the script posts the payload to the `/api/workspace-templates` endpoint. Once registered the template can be retrieved by a `GET` operation on `/api/workspace-templates`. !!! tip Follow the same procedure to register workspace service templates and user resource templates
AzureTRE/docs/tre-admins/registering-templates.md/0
{ "file_path": "AzureTRE/docs/tre-admins/registering-templates.md", "repo_id": "AzureTRE", "token_count": 1105 }
108
# Start/Stop Azure TRE Once you've provisioned an Azure TRE instance it will begin to incur running costs of the underlying Azure services. Within evaluation or development, you may want to "pause" the TRE environment during out of office hours or weekends, to reduce costs without having to completely destroy the environment. The following `make` targets provide a simple way to start and stop both the Azure Firewall and Azure Application Gateway instances, considerably reducing the Azure TRE instance running costs. !!! info After running `make all` underlying Azure TRE services are automatically started and billing will start. ## Start Azure TRE This will allocate the Azure Firewall settings with a public IP and start the Azure Application Gateway service, starting billing of both services. ```bash make tre-start ``` ## Stop Azure TRE This will deallocate the Azure Firewall public IP and stop the Azure Application Gateway service, stopping billing of both services. ```bash make tre-stop ``` ## Automating `stop` In certain situations, you might want to stop any TRE running on a schedule to reduce costs in a wider way. We have this procedure setup in our development subscriptions where each night we stop all our environments after which each developer would need to _manually_ start their TRE when they need it again. ### Requirements We use [Azure Automation](https://learn.microsoft.com/azure/automation/overview) to run this procedure. Be sure to create a runbook with PowerShell 7.1 or PowerShell 7.2 enabled and an identity with contributor permissions on the subscription. Note that the script below uses a system managed identity and if you use something different then you might need to update the authentication part. If you create a new Automation account, you will have the required modules preinstalled. Finally, schedule it to run when it makes sense for you. ### Stop Runbook Script ```powershell try { "Logging in to Azure..." Connect-AzAccount -Identity } catch { Write-Error -Message $_.Exception throw $_.Exception } $azContext = Get-AzContext $azProfile = [Microsoft.Azure.Commands.Common.Authentication.Abstractions.AzureRmProfileProvider]::Instance.Profile $profileClient = New-Object -TypeName Microsoft.Azure.Commands.ResourceManager.Common.RMProfileClient -ArgumentList ($azProfile) $token = $profileClient.AcquireAccessToken($azContext.Subscription.TenantId) $authHeader = @{ 'Content-Type' = 'application/json' 'Authorization' = 'Bearer ' + $token.AccessToken } # Get all resource groups that have the default Azure TRE project tag value $ResourceGroups = Get-AzResourceGroup -Tag @{'project' = 'Azure Trusted Research Environment' } foreach ($Group in $ResourceGroups) { if ($Group.ResourceGroupName -like '*-ws-*') { # Deal with the workspace resource groups separately (below) continue } # Deallocate the Azure Firewall (expecting only one per TRE instance) $Firewall = Get-AzFirewall -ResourceGroupName $Group.ResourceGroupName if ($null -ne $Firewall) { $Firewall.Deallocate() Write-Output "Deallocating Firewall '$($Firewall.Name)'" Set-AzFirewall -AzureFirewall $Firewall } # Stop the Application Gateway(s) # Multiple Application Gateways may exist if the certs shared service is installed $Gateways = Get-AzApplicationGateway -ResourceGroupName $Group.ResourceGroupName foreach ($Gateway in $Gateways) { Write-Output "Stopping Application Gateway '$($Gateway.Name)'" Stop-AzApplicationGateway -ApplicationGateway $Gateway } # Stop the MySQL servers $MySQLServers = Get-AzResource -ResourceGroupName $Group.ResourceGroupName -ResourceType "Microsoft.DBforMySQL/servers" foreach ($Server in $MySQLServers) { # Invoke the REST API Write-Output "Stopping MySQL '$($Server.Name)'" $restUri = 'https://management.azure.com/subscriptions/' + $azContext.Subscription.Id + '/resourceGroups/' + $Group.ResourceGroupName + '/providers/Microsoft.DBForMySQL/servers/' + $Server.Name + '/stop?api-version=2020-01-01' $response = Invoke-RestMethod -Uri $restUri -Method POST -Headers $authHeader } # Deallocate all the virtual machine scale sets (resource processor) $VMSS = Get-AzVMSS -ResourceGroupName $Group.ResourceGroupName foreach ($item in $VMSS) { Write-Output "Stopping VMSS '$($item.Name)'" Stop-AzVmss -ResourceGroupName $item.ResourceGroupName -VMScaleSetName $item.Name -Force } # Deallocate all the VMs $VM = Get-AzVM -ResourceGroupName $Group.ResourceGroupName foreach ($item in $VM) { Write-Output "Stopping VM '$($item.Name)'" Stop-AzVm -ResourceGroupName $item.ResourceGroupName -Name $item.Name -Force } # Process all the workspace resource groups for this TRE instance $WorkspaceResourceGroups = Get-AzResourceGroup -Name "$($Group.ResourceGroupName)-ws-*" foreach ($wsrg in $WorkspaceResourceGroups) { # Deallocate all the VMs $VM = Get-AzVM -ResourceGroupName $wsrg.ResourceGroupName foreach ($item in $VM) { Write-Output "Stopping workspace VM '$($item.Name)'" Stop-AzVm -ResourceGroupName $item.ResourceGroupName -Name $item.Name -Force } } } ``` ### Automating `start` To restart the TRE core services (Firewall, Application Gateway(s), Virtual Machine Scale Sets, Virtual Machines, and MySQL), you can use `make tre-start`. Depending on your workflow, you might not be able to easily execute the `make` target. Alternatively, you can create a second Runbook and execute it manually. The PowerShell code to start TRE core services is below: ```powershell try { "Logging in to Azure..." Connect-AzAccount -Identity } catch { Write-Error -Message $_.Exception throw $_.Exception } $azContext = Get-AzContext $azProfile = [Microsoft.Azure.Commands.Common.Authentication.Abstractions.AzureRmProfileProvider]::Instance.Profile $profileClient = New-Object -TypeName Microsoft.Azure.Commands.ResourceManager.Common.RMProfileClient -ArgumentList ($azProfile) $token = $profileClient.AcquireAccessToken($azContext.Subscription.TenantId) $authHeader = @{ 'Content-Type' = 'application/json' 'Authorization' = 'Bearer ' + $token.AccessToken } # Get all resource groups that have the default Azure TRE project tag value $ResourceGroups = Get-AzResourceGroup -Tag @{'project' = 'Azure Trusted Research Environment' } foreach ($Group in $ResourceGroups) { if ($Group.ResourceGroupName -like '*-ws-*') { # Don't deal with the workspace resource groups continue } $azureTreId = $Group.Tags['tre_id'] Write-Output "Starting TRE core resources for '$azureTreId'" # Allocate the Azure Firewall (expecting only one per TRE instance) $Firewall = Get-AzFirewall -ResourceGroupName $Group.ResourceGroupName if ($null -ne $Firewall) { # Find the firewall's public IP and virtual network $pip = Get-AzPublicIpAddress -ResourceGroupName $Group.ResourceGroupName -Name "pip-fw-$azureTreId" $vnet = Get-AzVirtualNetwork -ResourceGroupName $Group.ResourceGroupName -Name "vnet-$azureTreId" $Firewall.Allocate($vnet, $pip) Write-Output "Allocating Firewall '$($Firewall.Name)' with public IP '$($pip.Name)'" Set-AzFirewall -AzureFirewall $Firewall } # Start the Application Gateway(s) # Multiple Application Gateways may exist if the certs shared service is installed $Gateways = Get-AzApplicationGateway -ResourceGroupName $Group.ResourceGroupName foreach ($Gateway in $Gateways) { Write-Output "Starting Application Gateway '$($Gateway.Name)'" Start-AzApplicationGateway -ApplicationGateway $Gateway } # Start the MySQL servers $MySQLServers = Get-AzResource -ResourceGroupName $Group.ResourceGroupName -ResourceType "Microsoft.DBforMySQL/servers" foreach ($Server in $MySQLServers) { # Invoke the REST API Write-Output "Starting MySQL '$($Server.Name)'" $restUri = 'https://management.azure.com/subscriptions/' + $azContext.Subscription.Id + '/resourceGroups/' + $Group.ResourceGroupName + '/providers/Microsoft.DBForMySQL/servers/' + $Server.Name + '/start?api-version=2020-01-01' $response = Invoke-RestMethod -Uri $restUri -Method POST -Headers $authHeader } # Allocate all the virtual machine scale sets (resource processor) $VMSS = Get-AzVMSS -ResourceGroupName $Group.ResourceGroupName foreach ($item in $VMSS) { Write-Output "Starting VMSS '$($item.Name)'" Start-AzVmss -ResourceGroupName $item.ResourceGroupName -VMScaleSetName $item.Name } # Start VMs $VM = Get-AzVM -ResourceGroupName $Group.ResourceGroupName foreach ($item in $VM) { Write-Output "Starting VM '$($item.Name)'" Start-AzVm -ResourceGroupName $item.ResourceGroupName -Name $item.Name } } ```
AzureTRE/docs/tre-admins/start-stop.md/0
{ "file_path": "AzureTRE/docs/tre-admins/start-stop.md", "repo_id": "AzureTRE", "token_count": 2726 }
109
# Pipeline Template Schema This document will help you write a valid `pipeline: {}` block in your template. > For a working example, see `./templates/shared-services/sonatype-nexus/template_schema.json`. ## Schema ```json "pipeline": { "install": [ // <-- [install | upgrade | uninstall] { "stepId": "a unique string value here", "stepTitle": "Friendly description of the step here - will be displayed in the UI", "resourceTemplateName": "name of the resource template to update", // only required for shared_service targets "resourceType": "shared_service", // [ shared_service | user_resource | workspace_service | workspace ] "resourceAction": "upgrade", // <-- currently only upgrade supported "properties": [ { "name": "display_name", "type": "string", "value": "A new name here!" }] }, { "stepId": "main" // <-- deployment of the VM resource }, ``` ## Substituting Resource Property Values It's possible to refer to properties from the primary resource (the resource that triggered this pipeline) in the template steps. The values will be substituted in at runtime. The syntax is `{{ resource.propertyName }}`. For example: `"{{ resource.properties.display_name }}"`. Example pipeline in `template_schema.json`: The below example references 2 properties from the primary resource to be used in updating the firewall shared service. ```json "pipeline": { "upgrade": [ { "stepId": "1234567-87654-2345-6543", "stepTitle": "Update a firewall rule", "resourceTemplateName": "tre-shared-service-firewall", "resourceType": "shared_service", "resourceAction": "upgrade", "arraySubstitutionAction": "replace", // <-- [append | remove | replace] "arrayMatchField": "name", // <-- name of the field in the array object to match on, for remove / replace "properties": [ { "name": "rule_collections", "type": "array", // <-- More on array types below "value": { // <-- value can be string or object "name": "my-firewall-rule-collection", "action": "Allow", "rules": [ { "name": "my-rules", "target_fqdns": "{{ resource.properties.fqdns_list }}", "source_addresses": "{{ resource.properties.address_prefixes }}" } } }] }, ``` ## Working with Properties Containing Arrays It's possible that a resource property would actually be an array. As an example, the firewall shared service has the `rule_collections` property. This single property contains an array of objects. Since the values inside this array may have been sourced from different resources, it's important to leave other values in tact when modifying the property. To do so, the `arraySubstitutionAction` field supports the following values: - `append` - just append this object into the array - `replace` - find this object in the array (using the `arrayMatchField` value), and replace it with this value - `remove` - remove this property from the array (useful for `uninstall` actions) ## Notes - Each step is executed in serial, in the order defined in the template - Theoretically any number of steps could be created - A step with `step_id` of `main` represents where in the chain the primary resource will get deployed. It is possible to omit this step altogether, and not touch the primary resource at all.
AzureTRE/docs/tre-templates/pipeline-templates/pipeline-schema.md/0
{ "file_path": "AzureTRE/docs/tre-templates/pipeline-templates/pipeline-schema.md", "repo_id": "AzureTRE", "token_count": 1220 }
110
# Operations Debugging and Troubleshooting guide This guide explains how to go about finding the root cause of why a workspace resource might not have been deployed. The topics included in this section should be followed in order as that is how the message also flows in the system.
AzureTRE/docs/troubleshooting-faq/index.md/0
{ "file_path": "AzureTRE/docs/troubleshooting-faq/index.md", "repo_id": "AzureTRE", "token_count": 59 }
111
IMPORT = "import" EXPORT = "export" DRAFT_STATUS = "draft" SUBMITTED_STATUS = "submitted" IN_REVIEW_STATUS = "in_review" APPROVED_STATUS = "approved" REJECTED_STATUS = "rejected" CANCELLED_STATUS = "cancelled" BLOCKED_STATUS = "blocked_by_scan" FAILED_STATUS = "failed" AIRLOCK_REQUEST = "airlockRequest" AIRLOCK_REQUEST_STATUS = "status" AIRLOCK_REQUEST_STATUS_MESSAGE = "statusMessage"
AzureTRE/e2e_tests/airlock/strings.py/0
{ "file_path": "AzureTRE/e2e_tests/airlock/strings.py", "repo_id": "AzureTRE", "token_count": 162 }
112
import pytest from httpx import AsyncClient import config from resources import strings pytestmark = pytest.mark.asyncio @pytest.mark.smoke async def test_health() -> None: async with AsyncClient(verify=False) as client: url = f"{config.TRE_URL}{strings.API_HEALTH}" response = await client.get(url) assert response.status_code == 200 assert response.json()["services"] == [ {"service": "Cosmos DB", "status": "OK", "message": ""}, {"service": "Service Bus", "status": "OK", "message": ""}, {"service": "Resource Processor", "status": "OK", "message": ""}, ]
AzureTRE/e2e_tests/test_provisioned_health_api.py/0
{ "file_path": "AzureTRE/e2e_tests/test_provisioned_health_api.py", "repo_id": "AzureTRE", "token_count": 252 }
113
from http.server import HTTPServer, BaseHTTPRequestHandler from socketserver import ThreadingMixIn class RequestHandler(BaseHTTPRequestHandler): def do_GET(self): self.send_response(200) self.end_headers() class ThreadedHTTPServer(ThreadingMixIn, HTTPServer): """Handle requests in a separate thread.""" def start_server(): server = ThreadedHTTPServer(('0.0.0.0', 8080), RequestHandler) server.serve_forever()
AzureTRE/resource_processor/resources/httpserver.py/0
{ "file_path": "AzureTRE/resource_processor/resources/httpserver.py", "repo_id": "AzureTRE", "token_count": 150 }
114
# syntax=docker/dockerfile:1 FROM python:3.8-slim-bullseye SHELL ["/bin/bash", "-o", "pipefail", "-c"] RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache # Install Azure CLI ARG AZURE_CLI_VERSION=2.50.0-1~bullseye COPY scripts/azure-cli.sh /tmp/ RUN --mount=type=cache,target=/var/cache/apt --mount=type=cache,target=/var/lib/apt \ export AZURE_CLI_VERSION=${AZURE_CLI_VERSION} \ && /tmp/azure-cli.sh # TODO: Remove porter v0 https://github.com/microsoft/AzureTRE/issues/2990 # Install Porter ARG PORTER_MIRROR=https://cdn.porter.sh ARG PORTER_VERSION=v0.38.13 ARG PORTER_TERRAFORM_MIXIN_VERSION=v1.0.0-rc.1 ARG PORTER_AZ_MIXIN_VERSION=v0.7.3 ARG PORTER_AZURE_PLUGIN_VERSION=v0.11.2 ARG PORTER_HOME=/root/.porter-v0/ COPY scripts/porter.sh /tmp/ RUN export PORTER_MIRROR=${PORTER_MIRROR} \ PORTER_VERSION=${PORTER_VERSION} \ PORTER_TERRAFORM_MIXIN_VERSION=${PORTER_TERRAFORM_MIXIN_VERSION} \ PORTER_AZ_MIXIN_VERSION=${PORTER_AZ_MIXIN_VERSION} \ PORTER_AZURE_PLUGIN_VERSION=${PORTER_AZURE_PLUGIN_VERSION} \ PORTER_HOME=${PORTER_HOME} \ && /tmp/porter.sh ENV PORTER_HOME_V0 ${PORTER_HOME} # can't be in a non default path # ARG PORTER_HOME_V1=/home/$USERNAME/.porter-v1/ ARG PORTER_HOME_V1=/root/.porter/ ARG PORTER_VERSION=v1.0.15 ARG PORTER_TERRAFORM_MIXIN_VERSION=v1.0.2 ARG PORTER_AZ_MIXIN_VERSION=v1.0.1 ARG PORTER_AZURE_PLUGIN_VERSION=v1.2.0 COPY scripts/porter-v1.sh /tmp/ RUN export PORTER_VERSION=${PORTER_VERSION} \ PORTER_TERRAFORM_MIXIN_VERSION=${PORTER_TERRAFORM_MIXIN_VERSION} \ PORTER_AZ_MIXIN_VERSION=${PORTER_AZ_MIXIN_VERSION} \ PORTER_AZURE_PLUGIN_VERSION=${PORTER_AZURE_PLUGIN_VERSION} \ PORTER_HOME=${PORTER_HOME_V1} \ && /tmp/porter-v1.sh ENV PATH ${PORTER_HOME_V1}:$PATH # Install Docker RUN --mount=type=cache,target=/var/cache/apt --mount=type=cache,target=/var/lib/apt \ apt-get update && apt-get install -y apt-transport-https ca-certificates curl gnupg lsb-release --no-install-recommends \ && curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg \ && echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable" \ | tee /etc/apt/sources.list.d/docker.list > /dev/null \ && apt-get update && apt-get install -y docker-ce="5:23.0.3-1~debian.11~bullseye" docker-ce-cli="5:23.0.3-1~debian.11~bullseye" containerd.io="1.6.20-1" --no-install-recommends COPY ./vmss_porter/config.yaml ${PORTER_HOME_V1}/ ENV PYTHONPATH . COPY ./vmss_porter/requirements.txt /tmp/ RUN pip3 --no-cache-dir install -r /tmp/requirements.txt COPY . /app WORKDIR /app/ EXPOSE 8080 CMD ["./run.sh"]
AzureTRE/resource_processor/vmss_porter/Dockerfile/0
{ "file_path": "AzureTRE/resource_processor/vmss_porter/Dockerfile", "repo_id": "AzureTRE", "token_count": 1263 }
115
data "azurerm_application_insights" "core" { name = "appi-${var.tre_id}" resource_group_name = local.core_resource_group_name } data "azurerm_servicebus_namespace" "core" { name = "sb-${var.tre_id}" resource_group_name = local.core_resource_group_name } data "azurerm_storage_account" "storage" { name = local.storage_account_name resource_group_name = local.core_resource_group_name } data "azurerm_resource_group" "core" { name = local.core_resource_group_name } data "azurerm_subscription" "current" { } data "azurerm_eventgrid_topic" "airlock_notification" { name = local.notification_topic_name resource_group_name = local.core_resource_group_name } data "azurerm_subnet" "airlock_notification" { name = "AirlockNotifiactionSubnet" virtual_network_name = local.core_vnet resource_group_name = local.core_resource_group_name } data "azurerm_public_ip" "app_gateway_ip" { name = "pip-agw-${var.tre_id}" resource_group_name = local.core_resource_group_name } data "azurerm_firewall_policy" "core" { name = "fw-policy-${var.tre_id}" resource_group_name = local.core_resource_group_name } data "azurerm_ip_group" "resource_processor" { name = "ipg-resource-processor" resource_group_name = local.core_resource_group_name }
AzureTRE/templates/shared_services/airlock_notifier/terraform/data.tf/0
{ "file_path": "AzureTRE/templates/shared_services/airlock_notifier/terraform/data.tf", "repo_id": "AzureTRE", "token_count": 594 }
116
{ "$schema": "http://json-schema.org/draft-07/schema", "$id": "https://github.com/microsoft/AzureTRE/templates/shared_services/certs/template_schema.json", "type": "object", "title": "Certificate Service", "description": "Provides SSL Certs for a specified internal domain", "required": [ "domain_prefix", "cert_name" ], "properties": { "display_name": { "type": "string", "title": "Name for the workspace service", "description": "The name of the workspace service to be displayed to users", "default": "Certificate Service", "updateable": true }, "description": { "type": "string", "title": "Description of the workspace service", "description": "Description of the workspace service", "default": "Provides an SSL certificate for a specified internal domain", "updateable": true }, "domain_prefix": { "$id": "#/properties/domain_prefix", "type": "string", "title": "Domain prefix", "description": "The FQDN prefix (which will be prepended to {TRE_ID}.{LOCATION}.cloudapp.azure.com) to generate a certificate for" }, "cert_name": { "$id": "#/properties/cert_name", "type": "string", "title": "Cert name", "description": "What to call the certificate that's exported to KeyVault (alphanumeric and '-' only)" } }, "pipeline": { "install": [ { "stepId": "97713f47-a6d9-490b-9bf6-79a3b59df402", "stepTitle": "Add network firewall rules for certs/letsencrypt", "resourceTemplateName": "tre-shared-service-firewall", "resourceType": "shared-service", "resourceAction": "upgrade", "properties": [ { "name": "rule_collections", "type": "array", "arraySubstitutionAction": "replace", "arrayMatchField": "name", "value": { "name": "arc_svc_{{ resource.id }}_rp_certs", "action": "Allow", "rules": [ { "name": "LetsEncrypt", "description": "Resource Processor to interact with LetsEncrypt", "protocols": [ { "port": "443", "type": "Https" }, { "port": "80", "type": "Http" } ], "target_fqdns": [ "acme-v02.api.letsencrypt.org" ], "source_ip_groups_in_core": [ "ipg-resource-processor" ] } ] } } ] }, { "stepId": "main" } ], "upgrade": [ { "stepId": "250a65e6-6f66-4a27-b2bc-9c9890948b46", "stepTitle": "Update network firewall rules for certs/letsencrypt", "resourceTemplateName": "tre-shared-service-firewall", "resourceType": "shared-service", "resourceAction": "upgrade", "properties": [ { "name": "rule_collections", "type": "array", "arraySubstitutionAction": "replace", "arrayMatchField": "name", "value": { "name": "arc_svc_{{ resource.id }}_rp_certs", "action": "Allow", "rules": [ { "name": "LetsEncrypt", "description": "Resource Processor to interact with LetsEncrypt", "protocols": [ { "port": "443", "type": "Https" }, { "port": "80", "type": "Http" } ], "target_fqdns": [ "acme-v02.api.letsencrypt.org" ], "source_ip_groups_in_core": [ "ipg-resource-processor" ] } ] } } ] }, { "stepId": "main" } ], "uninstall": [ { "stepId": "main" }, { "stepId": "a0bd5e34-5902-4589-94a7-27dbc22f0724", "stepTitle": "Remove network firewall rules for certs/letsencrypt", "resourceTemplateName": "tre-shared-service-firewall", "resourceType": "shared-service", "resourceAction": "upgrade", "properties": [ { "name": "rule_collections", "type": "array", "arraySubstitutionAction": "remove", "arrayMatchField": "name", "value": { "name": "arc_svc_{{ resource.id }}_rp_certs" } } ] } ] } }
AzureTRE/templates/shared_services/certs/template_schema.json/0
{ "file_path": "AzureTRE/templates/shared_services/certs/template_schema.json", "repo_id": "AzureTRE", "token_count": 2693 }
117
resource "random_string" "username" { length = 4 upper = true lower = true numeric = true min_numeric = 1 min_lower = 1 special = false } resource "random_password" "password" { length = 16 lower = true min_lower = 1 upper = true min_upper = 1 numeric = true min_numeric = 1 special = true min_special = 1 override_special = "_%@" } # Using old resource due to - https://github.com/hashicorp/terraform-provider-azurerm/issues/6117 resource "azurerm_virtual_machine" "cyclecloud" { name = local.vm_name location = data.azurerm_resource_group.rg.location resource_group_name = data.azurerm_resource_group.rg.name network_interface_ids = [azurerm_network_interface.cyclecloud.id] vm_size = "Standard_DS3_v2" delete_os_disk_on_termination = true delete_data_disks_on_termination = true os_profile_linux_config { disable_password_authentication = false } os_profile { computer_name = local.vm_name admin_username = random_string.username.result admin_password = random_password.password.result } storage_image_reference { publisher = "azurecyclecloud" offer = "azure-cyclecloud" sku = "cyclecloud8" version = "latest" } plan { publisher = "azurecyclecloud" name = "cyclecloud8" product = "azure-cyclecloud" } storage_os_disk { name = "${local.vm_name}-osdisk" caching = "ReadWrite" create_option = "FromImage" managed_disk_type = "Premium_LRS" } identity { type = "SystemAssigned" } tags = local.tre_shared_service_tags lifecycle { ignore_changes = [tags] } } resource "azurerm_key_vault_secret" "cyclecloud_password" { name = "${local.vm_name}-admin-credentials" value = "${random_string.username.result}\n${random_password.password.result}" key_vault_id = data.azurerm_key_vault.core.id tags = local.tre_shared_service_tags lifecycle { ignore_changes = [tags] } } data "azurerm_subscription" "primary" { } # could change to RG contributor and sub reader resource "azurerm_role_assignment" "subscription_contributor" { scope = data.azurerm_subscription.primary.id role_definition_name = "Contributor" principal_id = azurerm_virtual_machine.cyclecloud.identity[0].principal_id } resource "azurerm_network_interface" "cyclecloud" { name = "nic-cyclecloud-${var.tre_id}-${local.short_service_id}" location = data.azurerm_resource_group.rg.location resource_group_name = local.core_resource_group_name tags = local.tre_shared_service_tags ip_configuration { name = "primary" subnet_id = data.azurerm_subnet.shared.id private_ip_address_allocation = "Dynamic" } lifecycle { ignore_changes = [tags] } } resource "azurerm_private_dns_zone" "cyclecloud" { name = "cyclecloud-${data.azurerm_public_ip.app_gateway_ip.fqdn}" resource_group_name = local.core_resource_group_name tags = local.tre_shared_service_tags lifecycle { ignore_changes = [tags] } } resource "azurerm_private_dns_zone_virtual_network_link" "cyclecloud_core_vnet" { name = "cyclecloudlink-core" resource_group_name = local.core_resource_group_name private_dns_zone_name = azurerm_private_dns_zone.cyclecloud.name virtual_network_id = data.azurerm_virtual_network.core.id tags = local.tre_shared_service_tags lifecycle { ignore_changes = [tags] } } resource "azurerm_private_dns_a_record" "cyclecloud_vm" { name = "@" zone_name = azurerm_private_dns_zone.cyclecloud.name resource_group_name = local.core_resource_group_name ttl = 300 records = [azurerm_network_interface.cyclecloud.private_ip_address] tags = local.tre_shared_service_tags lifecycle { ignore_changes = [tags] } }
AzureTRE/templates/shared_services/cyclecloud/terraform/cyclecloud.tf/0
{ "file_path": "AzureTRE/templates/shared_services/cyclecloud/terraform/cyclecloud.tf", "repo_id": "AzureTRE", "token_count": 1843 }
118
locals { address_space = cidrsubnets("10.28.0.0/23", 1, 1) container_subnet_address_space = local.address_space[0] # private host_subnet_address_space = local.address_space[1] # public short_service_id = substr(var.tre_resource_id, -4, -1) service_resource_name_suffix = "${var.tre_id}-svc-${local.short_service_id}" resource_group_name = "rg-${var.tre_id}-svc-${local.short_service_id}" virtual_network_name = "vnet-${local.service_resource_name_suffix}" core_virtual_network_name = "vnet-${var.tre_id}" core_resource_group_name = "rg-${var.tre_id}" databricks_workspace_name = "adb-${local.service_resource_name_suffix}" managed_resource_group_name = "rg-adb-${local.service_resource_name_suffix}" host_subnet_name = "adb-host-subnet-${local.service_resource_name_suffix}" container_subnet_name = "adb-container-subnet-${local.service_resource_name_suffix}" network_security_group_name = "nsg-${local.service_resource_name_suffix}" tre_shared_service_tags = { tre_id = var.tre_id tre_shared_service_id = var.tre_resource_id } }
AzureTRE/templates/shared_services/databricks-auth/terraform/locals.tf/0
{ "file_path": "AzureTRE/templates/shared_services/databricks-auth/terraform/locals.tf", "repo_id": "AzureTRE", "token_count": 532 }
119
locals { core_resource_group_name = "rg-${var.tre_id}" firewall_name = "fw-${var.tre_id}" firewall_diagnostic_categories_enabled = [ "AZFWApplicationRule", "AZFWNetworkRule", "AZFWDnsProxy", ] tre_shared_service_tags = { tre_id = var.tre_id tre_shared_service_id = var.tre_resource_id } api_driven_application_rule_collection = jsondecode(base64decode(var.api_driven_rule_collections_b64)) api_driven_network_rule_collection = jsondecode(base64decode(var.api_driven_network_rule_collections_b64)) firewall_policy_name = "fw-policy-${var.tre_id}" }
AzureTRE/templates/shared_services/firewall/terraform/locals.tf/0
{ "file_path": "AzureTRE/templates/shared_services/firewall/terraform/locals.tf", "repo_id": "AzureTRE", "token_count": 259 }
120
[ "NexusAuthenticatingRealm", "NexusAuthorizingRealm", "DockerToken" ]
AzureTRE/templates/shared_services/sonatype-nexus-vm/scripts/nexus_realms_config.json/0
{ "file_path": "AzureTRE/templates/shared_services/sonatype-nexus-vm/scripts/nexus_realms_config.json", "repo_id": "AzureTRE", "token_count": 33 }
121
{ "$schema": "http://json-schema.org/draft-07/schema", "$id": "https://github.com/microsoft/AzureTRE/templates/shared_services/sonatype-nexus-vm/template_schema.json", "type": "object", "title": "Sonatype Nexus Repository Manager", "description": "Sonatype Nexus is a repository manager that helps you manage components, binaries, and build artifacts. Its proxy functionality enables users without public Internet connectivity to access software packages.", "required": [ "ssl_cert_name" ], "properties": { "display_name": { "type": "string", "title": "Name for the workspace service", "description": "The name of the workspace service to be displayed to users", "default": "Package Mirror", "updateable": true }, "description": { "type": "string", "title": "Description of the workspace service", "description": "Description of the workspace service", "default": "Access software packages via Sonatype Nexus Repository Manager.", "updateable": true }, "overview": { "type": "string", "title": "Workspace Service Overview", "description": "Long form description of the workspace service, in markdown syntax", "default": "Sonatype Nexus is a repository manager that helps you manage components, binaries and build artifacts. It's proxy functionality enables users without public Internet connectivity to access software packages. To understand how to work with different package formats, please review the documentation here: [https://help.sonatype.com/repomanager3/nexus-repository-administration/formats](https://help.sonatype.com/repomanager3/nexus-repository-administration/formats).", "updateable": true }, "ssl_cert_name": { "type": "string", "title": "SSL certificate name", "description": "The name of the certificate to use (located in the core KeyVault) for configuring Nexus SSL", "default": "nexus-ssl" }, "is_exposed_externally": { "$id": "#/properties/is_exposed_externally", "type": "boolean", "title": "Expose externally", "description": "Is the Sonatype Nexus accessible from outside of the TRE network.", "default": false } }, "uiSchema": { "is_exposed_externally": { "classNames": "tre-hidden" } }, "pipeline": { "install": [ { "stepId": "main" }, { "stepId": "42024559-3a88-4518-b1ea-713aebc91cfd", "stepTitle": "Add Nexus rule collection to firewall", "resourceTemplateName": "tre-shared-service-firewall", "resourceType": "shared-service", "resourceAction": "upgrade", "properties": [ { "name": "rule_collections", "type": "array", "arraySubstitutionAction": "replace", "arrayMatchField": "name", "value": { "name": "arc_nexus", "rules": [ { "name": "vm-crl", "description": "CRLs for workspaces", "protocols": [ { "port": "443", "type": "Https" }, { "port": "80", "type": "Http" } ], "target_fqdns": "{{ resource.properties.workspace_vm_allowed_fqdns_list }}", "source_addresses": [ "*" ] }, { "name": "nexus-package-sources", "description": "Nexus Package Sources", "protocols": [ { "port": "443", "type": "Https" }, { "port": "80", "type": "Http" } ], "target_fqdns": "{{ resource.properties.nexus_allowed_fqdns_list }}", "source_addresses": "{{ resource.properties.private_ip_addresses }}" } ] } } ] } ], "upgrade": [ { "stepId": "main" }, { "stepId": "a794e818-0807-4012-90be-3e78f530383c", "stepTitle": "Update Nexus rule collection in firewall", "resourceTemplateName": "tre-shared-service-firewall", "resourceType": "shared-service", "resourceAction": "upgrade", "properties": [ { "name": "rule_collections", "type": "array", "arraySubstitutionAction": "replace", "arrayMatchField": "name", "value": { "name": "arc_nexus", "action": "Allow", "rules": [ { "name": "vm-crl", "description": "CRLs for workspaces", "protocols": [ { "port": "443", "type": "Https" }, { "port": "80", "type": "Http" } ], "target_fqdns": "{{ resource.properties.workspace_vm_allowed_fqdns_list }}", "source_addresses": [ "*" ] }, { "name": "nexus-package-sources", "description": "Nexus Package Sources", "protocols": [ { "port": "443", "type": "Https" }, { "port": "80", "type": "Http" } ], "target_fqdns": "{{ resource.properties.nexus_allowed_fqdns_list }}", "source_addresses": "{{ resource.properties.private_ip_addresses }}" } ] } } ] } ], "uninstall": [ { "stepId": "c3f95f9f-d125-4937-9403-84e4957a26b8", "stepTitle": "Remove Nexus rule collection from firewall", "resourceTemplateName": "tre-shared-service-firewall", "resourceType": "shared-service", "resourceAction": "upgrade", "properties": [ { "name": "rule_collections", "type": "array", "arraySubstitutionAction": "remove", "arrayMatchField": "name", "value": { "name": "arc_nexus" } } ] }, { "stepId": "main" } ] } }
AzureTRE/templates/shared_services/sonatype-nexus-vm/template_schema.json/0
{ "file_path": "AzureTRE/templates/shared_services/sonatype-nexus-vm/template_schema.json", "repo_id": "AzureTRE", "token_count": 3614 }
122
{ "$schema": "http://json-schema.org/draft-07/schema", "$id": "https://github.com/microsoft/AzureTRE/templates/workspace_services/azureml/template_schema.json", "type": "object", "title": "Azure Machine Learning", "description": "Azure Machine Learning", "required": [], "properties": { "display_name": { "type": "string", "title": "Name for the workspace service", "description": "The name of the workspace service to be displayed to users", "default": "Azure Machine Learning", "updateable": true }, "description": { "type": "string", "title": "Description of the workspace service", "description": "Description of the workspace service", "default": "Azure Machine Learning empowers data scientists and developers to build, deploy, and manage high-quality models faster and with confidence.", "updateable": true }, "overview": { "type": "string", "title": "Workspace Service Overview", "description": "Long form description of the workspace service, in markdown syntax", "default": "Azure Machine Learning is a cloud service for accelerating and managing the machine learning project lifecycle. Machine learning professionals, data scientists, and engineers can use it in their day-to-day workflows: Train and deploy models, and manage MLOps. \nYou can create a model in Azure Machine Learning or use a model built from an open-source platform, such as Pytorch, TensorFlow, or scikit-learn. MLOps tools help you monitor, retrain, and redeploy models.\n- [Azure Machine Learning Documentation](https://learn.microsoft.com/en-us/azure/machine-learning/)\n- [Azure Machine Learning Python SDK](https://docs.microsoft.com/en-us/python/api/overview/azure/ml/intro?view=azure-ml-py)", "updateable": true }, "is_exposed_externally": { "$id": "#/properties/is_exposed_externally", "type": "boolean", "title": "Expose externally", "description": "Is the Azure ML workspace accessible from outside of the workspace network. Also opens firewall rules to allow compute instances with public IP addresses.", "default": false }, "address_space": { "$id": "#/properties/address_space", "type": "string", "title": "Address space", "description": "The address space for use by AML subnets" } }, "uiSchema": { "address_space": { "classNames": "tre-hidden" } }, "pipeline": { "install": [ { "stepId": "12ba0dad-ea6c-4d0d-9255-d316212f5ffa", "stepTitle": "Upgrade to ensure aware of address space", "resourceType": "workspace", "resourceAction": "upgrade", "properties": [ ] }, { "stepId": "main" }, { "stepId": "260421b3-7308-491f-b531-e007cdc0ff46", "stepTitle": "Add network firewall rules for azureml", "resourceTemplateName": "tre-shared-service-firewall", "resourceType": "shared-service", "resourceAction": "upgrade", "properties": [ { "name": "network_rule_collections", "type": "array", "arraySubstitutionAction": "replace", "arrayMatchField": "name", "value": { "name": "nrc_svc_{{ resource.id }}_azureml", "action": "Allow", "rules": [ { "name": "AzureMachineLearning", "description": "Azure Machine Learning rules", "source_addresses": "{{ resource.properties.aml_subnet_address_prefixes }}", "destination_addresses": [ "AzureMachineLearning" ], "destination_ports": [ "443", "8787", "18881" ], "protocols": [ "TCP" ] }, { "name": "BatchNodeManagement", "description": "Batch Node Management", "source_addresses": "{{ resource.properties.aml_subnet_address_prefixes }}", "destination_addresses": [ "{{ resource.properties.batch_tag }}" ], "destination_ports": [ "443" ], "protocols": [ "TCP" ] }, { "name": "AzureMachineLearningUdp", "description": "Azure Machine Learning UDP", "source_addresses": "{{ resource.properties.aml_subnet_address_prefixes }}", "destination_addresses": [ "AzureMachineLearning" ], "destination_ports": [ "5831" ], "protocols": [ "UDP" ] }, { "name": "AzureML_Dependancies", "description": "AzureML Dependancies", "source_addresses": "{{ resource.properties.aml_subnet_address_prefixes }}", "destination_addresses": [ "AzureActiveDirectory", "AzureResourceManager", "{{ resource.properties.mcr_tag }}", "AzureFrontDoor.FirstParty" ], "destination_ports": [ "443" ], "protocols": [ "TCP" ] }, { "name": "AzureML_Client", "description": "AzureML Client", "source_addresses": "{{ resource.properties.workspace_address_spaces }}", "destination_addresses": [ "AzureActiveDirectory", "AzureResourceManager", "AzureMachineLearning" ], "destination_ports": [ "443" ], "protocols": [ "TCP" ] }, { "name": "AzureML_Storage", "description": "AzureML Storage", "source_addresses": "{{ resource.properties.aml_subnet_address_prefixes }}", "destination_addresses": [ "{{ resource.properties.storage_tag }}" ], "destination_ports": [ "443", "445" ], "protocols": [ "TCP" ] } ] } }, { "name": "rule_collections", "type": "array", "arraySubstitutionAction": "replace", "arrayMatchField": "name", "value": { "name": "arc_svc_{{ resource.id }}_azureml_client", "action": "Allow", "rules": [ { "name": "AzureML_client", "description": "AzureML rules", "source_addresses": "{{ resource.properties.workspace_address_spaces }}", "target_fqdns": [ "aadcdn.msauth.net", "{{ resource.properties.aml_fqdn }}", "automlresources-prod.azureedge.net" ], "protocols": [ { "port": "443", "type": "Https" } ] } ] } } ] } ], "upgrade": [ { "stepId": "main" }, { "stepId": "260421b3-7308-491f-b531-e007cdc0ff47", "stepTitle": "Add network firewall rules for azureml", "resourceTemplateName": "tre-shared-service-firewall", "resourceType": "shared-service", "resourceAction": "upgrade", "properties": [ { "name": "network_rule_collections", "type": "array", "arraySubstitutionAction": "replace", "arrayMatchField": "name", "value": { "name": "nrc_svc_{{ resource.id }}_azureml", "action": "Allow", "rules": [ { "name": "AzureMachineLearning", "description": "Azure Machine Learning rules", "source_addresses": "{{ resource.properties.aml_subnet_address_prefixes }}", "destination_addresses": [ "AzureMachineLearning" ], "destination_ports": [ "443", "8787", "18881" ], "protocols": [ "TCP" ] }, { "name": "BatchNodeManagement", "description": "Batch Node Management", "source_addresses": "{{ resource.properties.aml_subnet_address_prefixes }}", "destination_addresses": [ "{{ resource.properties.batch_tag }}" ], "destination_ports": [ "443" ], "protocols": [ "TCP" ] }, { "name": "AzureMachineLearningUdp", "description": "Azure Machine Learning UDP", "source_addresses": "{{ resource.properties.aml_subnet_address_prefixes }}", "destination_addresses": [ "AzureMachineLearning" ], "destination_ports": [ "5831" ], "protocols": [ "UDP" ] }, { "name": "AzureML_Dependancies", "description": "AzureML Dependancies", "source_addresses": "{{ resource.properties.aml_subnet_address_prefixes }}", "destination_addresses": [ "AzureActiveDirectory", "AzureResourceManager", "{{ resource.properties.mcr_tag }}", "AzureFrontDoor.FirstParty" ], "destination_ports": [ "443" ], "protocols": [ "TCP" ] }, { "name": "AzureML_Client", "description": "AzureML Client", "source_addresses": "{{ resource.properties.workspace_address_spaces }}", "destination_addresses": [ "AzureActiveDirectory", "AzureResourceManager", "AzureMachineLearning" ], "destination_ports": [ "443" ], "protocols": [ "TCP" ] }, { "name": "AzureML_Storage", "description": "AzureML Storage", "source_addresses": "{{ resource.properties.aml_subnet_address_prefixes }}", "destination_addresses": [ "{{ resource.properties.storage_tag }}" ], "destination_ports": [ "443", "445" ], "protocols": [ "TCP" ] } ] } }, { "name": "rule_collections", "type": "array", "arraySubstitutionAction": "replace", "arrayMatchField": "name", "value": { "name": "arc_svc_{{ resource.id }}_azureml_client", "action": "Allow", "rules": [ { "name": "AzureML_client", "description": "AzureML rules", "source_addresses": "{{ resource.properties.workspace_address_spaces }}", "target_fqdns": [ "aadcdn.msauth.net", "{{ resource.properties.aml_fqdn }}", "automlresources-prod.azureedge.net" ], "protocols": [ { "port": "443", "type": "Https" } ] } ] } } ] } ], "uninstall": [ { "stepId": "260421b3-7308-491f-b531-e007cdc0ff48", "stepTitle": "Add network firewall rules for azureml", "resourceTemplateName": "tre-shared-service-firewall", "resourceType": "shared-service", "resourceAction": "upgrade", "properties": [ { "name": "network_rule_collections", "type": "array", "arraySubstitutionAction": "remove", "arrayMatchField": "name", "value": { "name": "nrc_svc_{{ resource.id }}_azureml" } }, { "name": "rule_collections", "type": "array", "arraySubstitutionAction": "remove", "arrayMatchField": "name", "value": { "name": "arc_svc_{{ resource.id }}_azureml" } } ] }, { "stepId": "main" } ] } }
AzureTRE/templates/workspace_services/azureml/template_schema.json/0
{ "file_path": "AzureTRE/templates/workspace_services/azureml/template_schema.json", "repo_id": "AzureTRE", "token_count": 8032 }
123
PARENT_SERVICE_ID=__CHANGE_ME__ WORKSPACE_ID=__CHANGE_ME__ ID=__CHANGE_ME__ AZURE_LOCATION=__CHANGE_ME__ VM_SIZE="Standard_D2s_v3" AAD_TENANT_ID=__CHANGE_ME__ USER_OBJECT_ID=__CHANGE_ME__ ARM_CLIENT_ID=__CHANGE_ME__ ARM_CLIENT_SECRET=__CHANGE_ME__ ARM_TENANT_ID=__CHANGE_ME__
AzureTRE/templates/workspace_services/azureml/user_resources/aml_compute/.env.sample/0
{ "file_path": "AzureTRE/templates/workspace_services/azureml/user_resources/aml_compute/.env.sample", "repo_id": "AzureTRE", "token_count": 139 }
124
{ "schemaType": "ParameterSet", "schemaVersion": "1.0.1", "namespace": "", "name": "tre-service-databricks", "parameters": [ { "name": "id", "source": { "env": "ID" } }, { "name": "tre_id", "source": { "env": "TRE_ID" } }, { "name": "workspace_id", "source": { "env": "WORKSPACE_ID" } }, { "name": "address_space", "source": { "env": "ADDRESS_SPACE" } }, { "name": "is_exposed_externally", "source": { "env": "IS_EXPOSED_EXTERNALLY" } }, { "name": "tfstate_container_name", "source": { "env": "TERRAFORM_STATE_CONTAINER_NAME" } }, { "name": "tfstate_resource_group_name", "source": { "env": "MGMT_RESOURCE_GROUP_NAME" } }, { "name": "tfstate_storage_account_name", "source": { "env": "MGMT_STORAGE_ACCOUNT_NAME" } }, { "name": "arm_environment", "source": { "env": "ARM_ENVIRONMENT" } } ] }
AzureTRE/templates/workspace_services/databricks/parameters.json/0
{ "file_path": "AzureTRE/templates/workspace_services/databricks/parameters.json", "repo_id": "AzureTRE", "token_count": 626 }
125
resource "azurerm_storage_account" "gitea" { name = local.storage_name resource_group_name = data.azurerm_resource_group.ws.name location = data.azurerm_resource_group.ws.location account_tier = "Standard" account_replication_type = "GRS" tags = local.workspace_service_tags lifecycle { ignore_changes = [tags] } } resource "azurerm_storage_account_network_rules" "stgrules" { storage_account_id = azurerm_storage_account.gitea.id default_action = "Deny" bypass = ["AzureServices"] } resource "azurerm_private_endpoint" "stgfilepe" { name = "stgfilepe-${local.service_resource_name_suffix}" location = data.azurerm_resource_group.ws.location resource_group_name = data.azurerm_resource_group.ws.name subnet_id = data.azurerm_subnet.services.id tags = local.workspace_service_tags lifecycle { ignore_changes = [tags] } private_dns_zone_group { name = "private-dns-zone-group" private_dns_zone_ids = [data.azurerm_private_dns_zone.filecore.id] } private_service_connection { name = "stgfilepesc-${local.service_resource_name_suffix}" private_connection_resource_id = azurerm_storage_account.gitea.id is_manual_connection = false subresource_names = ["File"] } } resource "azurerm_storage_share" "gitea" { name = "gitea-data" storage_account_name = azurerm_storage_account.gitea.name quota = var.gitea_storage_limit }
AzureTRE/templates/workspace_services/gitea/terraform/storage.tf/0
{ "file_path": "AzureTRE/templates/workspace_services/gitea/terraform/storage.tf", "repo_id": "AzureTRE", "token_count": 743 }
126
#!/usr/bin/with-contenv sh echo >&2 "starting sshd" mkdir -p /run/sshd exec /usr/sbin/sshd -D
AzureTRE/templates/workspace_services/guacamole/guacamole-server/docker/services/sshd/run/0
{ "file_path": "AzureTRE/templates/workspace_services/guacamole/guacamole-server/docker/services/sshd/run", "repo_id": "AzureTRE", "token_count": 43 }
127
/** * */ package org.apache.guacamole.auth.azuretre.user;
AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/src/main/java/org/apache/guacamole/auth/azuretre/user/package-info.java/0
{ "file_path": "AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/src/main/java/org/apache/guacamole/auth/azuretre/user/package-info.java", "repo_id": "AzureTRE", "token_count": 25 }
128
#!/bin/bash set -e export TF_LOG="" # This script assumes you have created an .env from the sample and the variables # will come from there. # shellcheck disable=SC2154 terraform init -input=false -backend=true -reconfigure \ -backend-config="resource_group_name=$TF_VAR_mgmt_resource_group_name" \ -backend-config="storage_account_name=$TF_VAR_mgmt_storage_account_name" \ -backend-config="container_name=$TF_VAR_terraform_state_container_name" \ -backend-config="key=${TRE_ID}${TF_VAR_workspace_id}guacamole" terraform plan terraform apply -auto-approve
AzureTRE/templates/workspace_services/guacamole/terraform/deploy.sh/0
{ "file_path": "AzureTRE/templates/workspace_services/guacamole/terraform/deploy.sh", "repo_id": "AzureTRE", "token_count": 210 }
129
FROM --platform=linux/amd64 debian:bullseye-slim # PORTER_INIT SHELL ["/bin/bash", "-o", "pipefail", "-c"] RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache # Git is required for terraform_azurerm_environment_configuration RUN --mount=type=cache,target=/var/cache/apt --mount=type=cache,target=/var/lib/apt \ apt-get update \ && apt-get install --no-install-recommends -y git jq curl ca-certificates RUN export PORTER_HOME=/home/"${USER}"/.porter \ && curl -L https://cdn.porter.sh/latest/install-linux.sh | bash \ && "${PORTER_HOME}"/porter mixin install docker ENV PATH /home/$USER/.porter/:$PATH # PORTER_MIXINS # Use the BUNDLE_DIR build argument to copy files into the bundle COPY --link . ${BUNDLE_DIR}/
AzureTRE/templates/workspace_services/innereye/Dockerfile.tmpl/0
{ "file_path": "AzureTRE/templates/workspace_services/innereye/Dockerfile.tmpl", "repo_id": "AzureTRE", "token_count": 315 }
130
data "azurerm_app_service_plan" "workspace" { name = "plan-${var.workspace_id}" resource_group_name = data.azurerm_resource_group.ws.name } resource "random_uuid" "inference_auth_key" { } resource "azurerm_app_service" "inference" { name = "app-inf-${local.service_resource_name_suffix}" location = data.azurerm_resource_group.ws.location resource_group_name = data.azurerm_resource_group.ws.name app_service_plan_id = data.azurerm_app_service_plan.workspace.id https_only = true tags = local.tre_workspace_service_tags site_config { always_on = true http2_enabled = true } app_settings = { "WEBSITE_VNET_ROUTE_ALL" = "1" "WEBSITE_DNS_SERVER" = "168.63.129.16" "SCM_DO_BUILD_DURING_DEPLOYMENT" = "True" "APPLICATION_ID" = var.inference_sp_client_id "CLUSTER" = local.aml_compute_cluster_name "WORKSPACE_NAME" = local.aml_workspace_name "EXPERIMENT_NAME" = "main" "RESOURCE_GROUP" = data.azurerm_resource_group.ws.name "SUBSCRIPTION_ID" = data.azurerm_client_config.current.subscription_id "TENANT_ID" = data.azurerm_client_config.current.tenant_id "DATASTORE_NAME" = "inferencedatastore" "IMAGE_DATA_FOLDER" = "imagedata" } connection_string { name = "AZUREML_SERVICE_PRINCIPAL_SECRET" type = "Custom" value = var.inference_sp_client_secret } connection_string { name = "API_AUTH_SECRET" type = "Custom" value = random_uuid.inference_auth_key.result } lifecycle { ignore_changes = [tags] } } resource "azurerm_app_service_virtual_network_swift_connection" "inference" { app_service_id = azurerm_app_service.inference.id subnet_id = data.azurerm_subnet.web_apps.id } data "azurerm_private_dns_zone" "azurewebsites" { name = module.terraform_azurerm_environment_configuration.private_links["privatelink.azurewebsites.net"] resource_group_name = local.core_resource_group_name } resource "azurerm_private_endpoint" "inference" { name = "pe-inference-${local.service_resource_name_suffix}" location = data.azurerm_resource_group.ws.location resource_group_name = data.azurerm_resource_group.ws.name subnet_id = data.azurerm_subnet.services.id tags = local.tre_workspace_service_tags private_service_connection { private_connection_resource_id = azurerm_app_service.inference.id name = "psc-inference-${local.service_resource_name_suffix}" subresource_names = ["sites"] is_manual_connection = false } private_dns_zone_group { name = module.terraform_azurerm_environment_configuration.private_links["privatelink.azurewebsites.net"] private_dns_zone_ids = [data.azurerm_private_dns_zone.azurewebsites.id] } lifecycle { ignore_changes = [tags] } }
AzureTRE/templates/workspace_services/innereye/terraform/web_app.tf/0
{ "file_path": "AzureTRE/templates/workspace_services/innereye/terraform/web_app.tf", "repo_id": "AzureTRE", "token_count": 1335 }
131
data "azurerm_storage_share" "shared_storage" { name = local.shared_storage_share storage_account_name = local.storage_name } data "template_file" "mlflow_windows_config" { template = file("${path.module}/../mlflow-vm-config/windows/template_config.ps1") vars = { MLFlow_Connection_String = data.azurerm_storage_account.mlflow.primary_connection_string } } data "template_file" "mlflow_linux_config" { template = file("${path.module}/../mlflow-vm-config/linux/template_config.sh") vars = { MLFlow_Connection_String = data.azurerm_storage_account.mlflow.primary_connection_string } } data "local_file" "version" { filename = "${path.module}/../mlflow-server/version.txt" } data "azurerm_monitor_diagnostic_categories" "mlflow" { resource_id = azurerm_linux_web_app.mlflow.id depends_on = [ azurerm_linux_web_app.mlflow ] }
AzureTRE/templates/workspace_services/mlflow/terraform/data.tf/0
{ "file_path": "AzureTRE/templates/workspace_services/mlflow/terraform/data.tf", "repo_id": "AzureTRE", "token_count": 344 }
132
locals { short_service_id = substr(var.tre_resource_id, -4, -1) short_workspace_id = substr(var.workspace_id, -4, -1) workspace_resource_name_suffix = "${var.tre_id}-ws-${local.short_workspace_id}" service_resource_name_suffix = "${var.tre_id}-ws-${local.short_workspace_id}-svc-${local.short_service_id}" keyvault_name = lower("kv-${substr(local.workspace_resource_name_suffix, -20, -1)}") core_resource_group_name = "rg-${var.tre_id}" sql_sku = { "B | 4GB 2vCores" = { value = "B_Standard_B2s" }, "GP | 8GB 2vCores" = { value = "GP_Standard_D2ds_v4" }, "BC | 16GB 2vCores" = { value = "MO_Standard_E2ds_v4" } } workspace_service_tags = { tre_id = var.tre_id tre_workspace_id = var.workspace_id tre_workspace_service_id = var.tre_resource_id } }
AzureTRE/templates/workspace_services/mysql/terraform/locals.tf/0
{ "file_path": "AzureTRE/templates/workspace_services/mysql/terraform/locals.tf", "repo_id": "AzureTRE", "token_count": 430 }
133
CREATE SCHEMA ${SCHEMA_NAME} AUTHORIZATION ${OHDSI_ADMIN_ROLE}; COMMENT ON SCHEMA ${SCHEMA_NAME} IS 'Schema containing tables to support WebAPI functionality'; GRANT USAGE ON SCHEMA ${SCHEMA_NAME} TO PUBLIC; GRANT ALL ON SCHEMA ${SCHEMA_NAME} TO GROUP ${OHDSI_ADMIN_ROLE}; GRANT USAGE ON SCHEMA ${SCHEMA_NAME} TO GROUP ${OHDSI_APP_ROLE}; ALTER DEFAULT PRIVILEGES IN SCHEMA ${SCHEMA_NAME} GRANT INSERT, SELECT, UPDATE, DELETE, REFERENCES, TRIGGER ON TABLES TO ${OHDSI_APP_ROLE}; ALTER DEFAULT PRIVILEGES IN SCHEMA ${SCHEMA_NAME} GRANT SELECT, USAGE ON SEQUENCES TO ${OHDSI_APP_ROLE}; ALTER DEFAULT PRIVILEGES IN SCHEMA ${SCHEMA_NAME} GRANT EXECUTE ON FUNCTIONS TO ${OHDSI_APP_ROLE}; ALTER DEFAULT PRIVILEGES IN SCHEMA ${SCHEMA_NAME} GRANT USAGE ON TYPES TO ${OHDSI_APP_ROLE};
AzureTRE/templates/workspace_services/ohdsi/sql/atlas_create_schema.sql/0
{ "file_path": "AzureTRE/templates/workspace_services/ohdsi/sql/atlas_create_schema.sql", "repo_id": "AzureTRE", "token_count": 320 }
134
locals { short_service_id = substr(var.tre_resource_id, -4, -1) short_workspace_id = substr(var.workspace_id, -4, -1) workspace_resource_name_suffix = "${var.tre_id}-ws-${local.short_workspace_id}" core_resource_group_name = "rg-${var.tre_id}" service_suffix = "${local.workspace_resource_name_suffix}-svc-${local.short_service_id}" key_vault_name = lower("kv-${substr(local.workspace_resource_name_suffix, -20, -1)}") storage_name = lower(replace("stg${substr(local.workspace_resource_name_suffix, -8, -1)}", "-", "")) porter_yaml = yamldecode(file("${path.module}/../porter.yaml")) # ATLAS Database postgres_admin_username = "postgres_admin" postgres_webapi_admin_username = "ohdsi_admin_user" postgres_webapi_admin_role = "ohdsi_admin" postgres_webapi_app_username = "ohdsi_app_user" postgres_webapi_app_role = "ohdsi_app" postgres_webapi_database_name = "atlas_webapi_db" postgres_schema_name = "webapi" postgres_version = "14" postgres_server_log_analytics_categories = [ "PostgreSQLLogs" ] # ATLAS UI atlas_ui_name = "app-ohdsi-atlas-${local.service_suffix}" atlas_ui_fqdn = "${local.atlas_ui_name}.${module.terraform_azurerm_environment_configuration.web_app_suffix}" atlas_ui_url = "https://${local.atlas_ui_fqdn}/atlas" atlas_ui_url_welcome = "${local.atlas_ui_url}/#/welcome" atlas_ui_storage_share_name = "atlas-${local.service_suffix}" atlas_ui_mount_path = "/etc/atlas" atlas_ui_docker_image_name = "ohdsi/atlas" atlas_ui_docker_image_tag = "2.12.1" config_local_file_path = "/tmp/config-local.js" atals_ui_log_analytics_categories = [ "AppServiceAppLogs", "AppServiceConsoleLogs", "AppServiceHTTPLogs" ] # OHDSI WEB API ohdsi_webapi_name = "app-ohdsi-webapi-${local.service_suffix}" ohdsi_webapi_fqdn = "${local.ohdsi_webapi_name}.${module.terraform_azurerm_environment_configuration.web_app_suffix}" ohdsi_webapi_url = "https://${local.ohdsi_webapi_fqdn}/WebAPI/" ohdsi_webapi_url_auth_callback = "${local.ohdsi_webapi_url}user/oauth/callback" ohdsi_api_docker_image_name = "ohdsi/webapi" ohdsi_api_docker_image_tag = "2.12.1" ohdsi_api_flyway_baseline_version = "2.2.5.20180212152023" ohdsi_api_log_analytics_categories = [ "AppServiceAppLogs", "AppServiceConsoleLogs", "AppServiceHTTPLogs" ] tre_workspace_service_tags = { tre_id = var.tre_id tre_workspace_id = var.workspace_id tre_workspace_service_id = var.tre_resource_id } # Data Source configuration dialects = local.porter_yaml["custom"]["dialects"] data_source_config = try(jsondecode(base64decode(var.data_source_config)), {}) data_source_daimons = try(jsondecode(base64decode(var.data_source_daimons)), {}) data_source_dialect = try(local.data_source_config.dialect, null) is_synapse_data_source = var.configure_data_source && local.data_source_dialect == "Azure Synapse" daimon_results = try(local.data_source_daimons.daimon_results, null) daimon_temp = try(local.data_source_daimons.daimon_temp, null) results_schema_name = local.is_synapse_data_source && local.daimon_results != null ? "${local.data_source_daimons.daimon_results}_${replace(var.tre_resource_id, "-", "_")}" : local.daimon_results temp_schema_name = local.is_synapse_data_source && local.daimon_temp != null ? "${local.data_source_daimons.daimon_temp}_${replace(var.tre_resource_id, "-", "_")}" : local.daimon_temp }
AzureTRE/templates/workspace_services/ohdsi/terraform/locals.tf/0
{ "file_path": "AzureTRE/templates/workspace_services/ohdsi/terraform/locals.tf", "repo_id": "AzureTRE", "token_count": 1751 }
135
resource "azurerm_service_plan" "workspace" { count = var.deploy_app_service_plan ? 1 : 0 name = "plan-${var.tre_resource_id}" location = azurerm_resource_group.ws.location resource_group_name = azurerm_resource_group.ws.name os_type = "Linux" sku_name = var.app_service_plan_sku tags = local.tre_workspace_tags lifecycle { ignore_changes = [tags] } }
AzureTRE/templates/workspaces/base/terraform/appserviceplan.tf/0
{ "file_path": "AzureTRE/templates/workspaces/base/terraform/appserviceplan.tf", "repo_id": "AzureTRE", "token_count": 201 }
136
resource "azurerm_network_security_group" "ws" { location = var.location name = "nsg-ws" resource_group_name = var.ws_resource_group_name tags = var.tre_workspace_tags lifecycle { ignore_changes = [tags] } } resource "azurerm_subnet_network_security_group_association" "services" { network_security_group_id = azurerm_network_security_group.ws.id subnet_id = azurerm_subnet.services.id depends_on = [ # meant to resolve AnotherOperation errors with one operation in the vnet at a time azurerm_subnet_route_table_association.rt_webapps_subnet_association ] } resource "azurerm_subnet_network_security_group_association" "webapps" { network_security_group_id = azurerm_network_security_group.ws.id subnet_id = azurerm_subnet.webapps.id depends_on = [ # meant to resolve AnotherOperation errors with one operation in the vnet at a time azurerm_subnet_network_security_group_association.webapps ] } resource "azurerm_network_security_rule" "deny_outbound_override" { access = "Deny" destination_address_prefix = "*" destination_port_range = "*" direction = "Outbound" name = "deny-outbound-override" network_security_group_name = azurerm_network_security_group.ws.name priority = 4096 protocol = "*" resource_group_name = var.ws_resource_group_name source_address_prefix = "*" source_port_range = "*" } resource "azurerm_network_security_rule" "deny_all_inbound_override" { access = "Deny" destination_address_prefix = "*" destination_port_range = "*" direction = "Inbound" name = "deny-inbound-override" network_security_group_name = azurerm_network_security_group.ws.name priority = 900 protocol = "*" resource_group_name = var.ws_resource_group_name source_address_prefix = "*" source_port_range = "*" } resource "azurerm_network_security_rule" "allow_inbound_within_workspace_vnet" { access = "Allow" destination_port_range = "*" destination_address_prefixes = azurerm_virtual_network.ws.address_space source_address_prefixes = azurerm_virtual_network.ws.address_space direction = "Inbound" name = "inbound-within-workspace-vnet" network_security_group_name = azurerm_network_security_group.ws.name priority = 100 protocol = "*" resource_group_name = var.ws_resource_group_name source_port_range = "*" } resource "azurerm_network_security_rule" "allow_outbound_within_workspace_vnet" { access = "Allow" destination_port_range = "*" destination_address_prefixes = azurerm_virtual_network.ws.address_space source_address_prefixes = azurerm_virtual_network.ws.address_space direction = "Outbound" name = "outbound-within-services-subnet" network_security_group_name = azurerm_network_security_group.ws.name priority = 100 protocol = "*" resource_group_name = var.ws_resource_group_name source_port_range = "*" } resource "azurerm_network_security_rule" "allow_outbound_to_shared_services" { access = "Allow" destination_address_prefixes = data.azurerm_subnet.shared.address_prefixes destination_port_range = "*" direction = "Outbound" name = "to-shared-services" network_security_group_name = azurerm_network_security_group.ws.name priority = 110 protocol = "*" resource_group_name = var.ws_resource_group_name source_address_prefix = "*" source_port_range = "*" } resource "azurerm_network_security_rule" "allow_outbound_to_internet" { access = "Allow" destination_address_prefix = "INTERNET" destination_port_range = "443" direction = "Outbound" name = "to-internet" network_security_group_name = azurerm_network_security_group.ws.name priority = 120 protocol = "Tcp" resource_group_name = var.ws_resource_group_name source_address_prefix = "*" source_port_range = "*" } resource "azurerm_network_security_rule" "allow_outbound_from_webapp_to_core_webapp" { access = "Allow" destination_port_range = "443" destination_address_prefixes = data.azurerm_subnet.core_webapps.address_prefixes source_address_prefixes = azurerm_subnet.webapps.address_prefixes direction = "Outbound" name = "outbound-workspace-webapps-to-tre-core-webapps" network_security_group_name = azurerm_network_security_group.ws.name priority = 130 protocol = "Tcp" resource_group_name = var.ws_resource_group_name source_port_range = "*" } resource "azurerm_network_security_rule" "allow_outbound_from_subnet" { access = "Allow" destination_port_range = "80" source_address_prefixes = azurerm_subnet.services.address_prefixes destination_address_prefix = "INTERNET" direction = "Outbound" name = "outbound-workspace-subnets-to-internet-for-crl" network_security_group_name = azurerm_network_security_group.ws.name priority = 101 protocol = "Tcp" resource_group_name = var.ws_resource_group_name source_port_range = "*" } resource "azurerm_network_security_rule" "allow_outbound_webapps_to_services" { access = "Allow" destination_port_ranges = [ "80", "443", "445", "3306", "3389", "5432", ] destination_address_prefixes = azurerm_subnet.services.address_prefixes source_address_prefixes = azurerm_subnet.webapps.address_prefixes direction = "Outbound" name = "outbound-from-services-to-webapps-subnets" network_security_group_name = azurerm_network_security_group.ws.name priority = 140 protocol = "Tcp" resource_group_name = var.ws_resource_group_name source_port_range = "*" } resource "azurerm_network_security_rule" "allow_inbound_from_bastion" { access = "Allow" destination_address_prefixes = azurerm_subnet.services.address_prefixes destination_port_ranges = [ "22", "3389", ] direction = "Inbound" name = "allow-inbound-from-bastion" network_security_group_name = azurerm_network_security_group.ws.name priority = 110 protocol = "Tcp" resource_group_name = var.ws_resource_group_name source_address_prefixes = [ data.azurerm_subnet.bastion.address_prefix ] source_port_range = "*" } resource "azurerm_network_security_rule" "allow_inbound_from_resourceprocessor" { access = "Allow" destination_address_prefixes = azurerm_subnet.services.address_prefixes destination_port_range = "443" direction = "Inbound" name = "allow-inbound-from-resourceprocessor" network_security_group_name = azurerm_network_security_group.ws.name priority = 120 protocol = "Tcp" resource_group_name = var.ws_resource_group_name source_address_prefixes = [ data.azurerm_subnet.resourceprocessor.address_prefix ] source_port_range = "*" } resource "azurerm_network_security_rule" "allow_inbound_from_airlockprocessor" { access = "Allow" destination_address_prefixes = azurerm_subnet.services.address_prefixes destination_port_range = "443" direction = "Inbound" name = "allow-inbound-from-airlockprocessor" network_security_group_name = azurerm_network_security_group.ws.name priority = 140 protocol = "Tcp" resource_group_name = var.ws_resource_group_name source_address_prefixes = [ data.azurerm_subnet.airlockprocessor.address_prefix ] source_port_range = "*" } resource "azurerm_network_security_rule" "allow_inbound_from_webapp_to_services" { access = "Allow" destination_port_ranges = [ "80", "443", "445", "3306", "3389", "5432", ] destination_address_prefixes = azurerm_subnet.services.address_prefixes source_address_prefixes = azurerm_subnet.webapps.address_prefixes direction = "Inbound" name = "inbound-from-webapps-to-services-subnets" network_security_group_name = azurerm_network_security_group.ws.name priority = 130 protocol = "Tcp" resource_group_name = var.ws_resource_group_name source_port_range = "*" } moved { from = azurerm_network_security_rule.deny-outbound-overrid to = azurerm_network_security_rule.deny_outbound_overrid } moved { from = azurerm_network_security_rule.deny-all-inbound-override to = azurerm_network_security_rule.deny_all_inbound_override } moved { from = azurerm_network_security_rule.allow-inbound-within-services-subnet to = azurerm_network_security_rule.allow_inbound_within_services_subnet } moved { from = azurerm_network_security_rule.allow-outbound-within-services-subnet to = azurerm_network_security_rule.allow_outbound_within_services_subnet } moved { from = azurerm_network_security_rule.allow-outbound-to-shared-services to = azurerm_network_security_rule.allow_outbound_to_shared_services } moved { from = azurerm_network_security_rule.allow-outbound-to-internet to = azurerm_network_security_rule.allow_outbound_to_internet } moved { from = azurerm_network_security_rule.allow-outbound-from-webapp-to-core-webapp to = azurerm_network_security_rule.allow_outbound_from_webapp_to_core_webapp } moved { from = azurerm_network_security_rule.allow-outbound-webapps-to-services to = azurerm_network_security_rule.allow_outbound_webapps_to_services } moved { from = azurerm_network_security_rule.allow-inbound-from-bastion to = azurerm_network_security_rule.allow_inbound_from_bastion } moved { from = azurerm_network_security_rule.allow-inbound-from-resourceprocessor to = azurerm_network_security_rule.allow_inbound_from_resourceprocessor } moved { from = azurerm_network_security_rule.allow-inbound-from-airlockprocessor to = azurerm_network_security_rule.allow_inbound_from_airlockprocessor } moved { from = azurerm_network_security_rule.allow-inbound-from-webapp-to-services to = azurerm_network_security_rule.allow_inbound_from_webapp_to_services }
AzureTRE/templates/workspaces/base/terraform/network/security.tf/0
{ "file_path": "AzureTRE/templates/workspaces/base/terraform/network/security.tf", "repo_id": "AzureTRE", "token_count": 5125 }
137
import { Spinner, SpinnerSize, Stack } from '@fluentui/react'; import React, { useContext, useEffect, useRef, useState } from 'react'; import { Route, Routes } from 'react-router-dom'; import { Admin } from '../../App'; import { ApiEndpoint } from '../../models/apiEndpoints'; import { Workspace } from '../../models/workspace'; import { useAuthApiCall, HttpMethod, ResultType } from '../../hooks/useAuthApiCall'; import { RootDashboard } from './RootDashboard'; import { LeftNav } from './LeftNav'; import { LoadingState } from '../../models/loadingState'; import { SharedServices } from '../shared/SharedServices'; import { SharedServiceItem } from '../shared/SharedServiceItem'; import { SecuredByRole } from '../shared/SecuredByRole'; import { RoleName } from '../../models/roleNames'; import { APIError } from '../../models/exceptions'; import { ExceptionLayout } from '../shared/ExceptionLayout'; import { AppRolesContext } from '../../contexts/AppRolesContext'; import { CostsContext } from '../../contexts/CostsContext'; import config from "../../config.json"; export const RootLayout: React.FunctionComponent = () => { const [workspaces, setWorkspaces] = useState([] as Array<Workspace>); const [loadingState, setLoadingState] = useState(LoadingState.Loading); const [loadingCostState, setLoadingCostState] = useState(LoadingState.Loading); const [apiError, setApiError] = useState({} as APIError); const [costApiError, setCostApiError] = useState({} as APIError); const apiCall = useAuthApiCall(); const appRolesCtx = useContext(AppRolesContext); const costsWriteCtx = useRef(useContext(CostsContext)); useEffect(() => { const getWorkspaces = async () => { try { const r = await apiCall(ApiEndpoint.Workspaces, HttpMethod.Get, undefined, undefined, ResultType.JSON); setLoadingState(LoadingState.Ok); r && r.workspaces && setWorkspaces(r.workspaces); } catch (e:any) { e.userMessage = 'Error retrieving resources'; setApiError(e); setLoadingState(LoadingState.Error); } }; getWorkspaces(); }, [apiCall]); useEffect(() => { const getCosts = async () => { try { if (appRolesCtx.roles.includes(RoleName.TREAdmin)) { costsWriteCtx.current.setLoadingState(LoadingState.Loading) const r = await apiCall(ApiEndpoint.Costs, HttpMethod.Get, undefined, undefined, ResultType.JSON); costsWriteCtx.current.setCosts([ ...r.workspaces, ...r.shared_services ]); costsWriteCtx.current.setLoadingState(LoadingState.Ok) setLoadingCostState(LoadingState.Ok); } else { costsWriteCtx.current.setLoadingState(LoadingState.AccessDenied) setLoadingCostState(LoadingState.AccessDenied); } } catch (e: any) { if (e instanceof APIError) { if (e.status === 404 /*subscription not supported*/) { config.debug && console.warn(e.message); setLoadingCostState(LoadingState.NotSupported); } else if (e.status === 429 /*too many requests*/ || e.status === 503 /*service unavaiable*/) { let msg = JSON.parse(e.message); let retryAfter = Number(msg.error["retry-after"]); config.debug && console.info("retrying after " + retryAfter + " seconds"); setTimeout(getCosts, retryAfter * 1000); } else { e.userMessage = 'Error retrieving costs'; setLoadingCostState(LoadingState.Error); } } else { e.userMessage = 'Error retrieving costs'; setLoadingCostState(LoadingState.Error); } costsWriteCtx.current.setLoadingState(LoadingState.Error) setCostApiError(e); } }; getCosts(); const ctx = costsWriteCtx.current; // run this on unmount - to clear the context return (() => ctx.setCosts([])); }, [apiCall, appRolesCtx.roles]); const addWorkspace = (w: Workspace) => { const ws = [...workspaces] ws.push(w); setWorkspaces(ws); } const updateWorkspace = (w: Workspace) => { const i = workspaces.findIndex((f: Workspace) => f.id === w.id); const ws = [...workspaces] ws.splice(i, 1, w); setWorkspaces(ws); } const removeWorkspace = (w: Workspace) => { const i = workspaces.findIndex((f: Workspace) => f.id === w.id); const ws = [...workspaces]; ws.splice(i, 1); setWorkspaces(ws); } switch (loadingState) { case LoadingState.Ok: return ( <> { loadingCostState === LoadingState.Error && <ExceptionLayout e={costApiError} /> } <Stack horizontal className='tre-body-inner'> <Stack.Item className='tre-left-nav' style={{marginTop:2}}> <LeftNav /> </Stack.Item><Stack.Item className='tre-body-content'> <Routes> <Route path="/" element={ <RootDashboard workspaces={workspaces} addWorkspace={(w: Workspace) => addWorkspace(w)} updateWorkspace={(w: Workspace) => updateWorkspace(w)} removeWorkspace={(w: Workspace) => removeWorkspace(w)} /> } /> <Route path="/admin" element={<Admin />} /> <Route path="/shared-services/*" element={ <Routes> <Route path="/" element={<SecuredByRole element={<SharedServices />} allowedAppRoles={[RoleName.TREAdmin]} errorString={"You must be a TRE Admin to access this area"}/>} /> <Route path=":sharedServiceId" element={<SecuredByRole element={<SharedServiceItem />} allowedAppRoles={[RoleName.TREAdmin]} errorString={"You must be a TRE Admin to access this area"}/>} /> </Routes> } /> </Routes> </Stack.Item> </Stack> </> ); case LoadingState.Error: return ( <ExceptionLayout e={apiError} /> ); default: return ( <div style={{ marginTop: '20px' }}> <Spinner label="Loading TRE" ariaLive="assertive" labelPosition="top" size={SpinnerSize.large} /> </div> ); } };
AzureTRE/ui/app/src/components/root/RootLayout.tsx/0
{ "file_path": "AzureTRE/ui/app/src/components/root/RootLayout.tsx", "repo_id": "AzureTRE", "token_count": 2592 }
138
import React from 'react'; import { Resource } from '../../models/resource'; import config from '../../config.json'; interface ResourceDebugProps { resource: Resource } export const ResourceDebug: React.FunctionComponent<ResourceDebugProps> = (props: ResourceDebugProps) => { return ( config.debug === true ? <> <hr /> <h3>Debug details:</h3> <ul> { Object.keys(props.resource).map((key, i) => { let val = typeof ((props.resource as any)[key]) === 'object' ? JSON.stringify((props.resource as any)[key]) : (props.resource as any)[key].toString() return ( <li key={i}> <b>{key}: </b>{val} </li> ) }) } </ul> </> : <></> ) };
AzureTRE/ui/app/src/components/shared/ResourceDebug.tsx/0
{ "file_path": "AzureTRE/ui/app/src/components/shared/ResourceDebug.tsx", "repo_id": "AzureTRE", "token_count": 396 }
139
import { MessageBar, MessageBarType, Pivot, PivotItem, PrimaryButton, Stack, TextField, TooltipHost } from "@fluentui/react"; import React, { useCallback, useEffect, useState } from "react"; import { HttpMethod, useAuthApiCall } from "../../../hooks/useAuthApiCall"; import { AirlockRequest, AirlockRequestStatus } from "../../../models/airlock"; import { ApiEndpoint } from "../../../models/apiEndpoints"; import { APIError } from "../../../models/exceptions"; import { ExceptionLayout } from "../ExceptionLayout"; import { CliCommand } from "../CliCommand"; interface AirlockRequestFilesSectionProps { request: AirlockRequest; workspaceApplicationIdURI: string; } export const AirlockRequestFilesSection: React.FunctionComponent<AirlockRequestFilesSectionProps> = (props: AirlockRequestFilesSectionProps) => { const COPY_TOOL_TIP_DEFAULT_MESSAGE = "Copy to clipboard" const [copyToolTipMessage, setCopyToolTipMessage] = useState<string>(COPY_TOOL_TIP_DEFAULT_MESSAGE); const [sasUrl, setSasUrl] = useState<string>(); const [sasUrlError, setSasUrlError] = useState(false); const [apiSasUrlError, setApiSasUrlError] = useState({} as APIError); const apiCall = useAuthApiCall(); const generateSasUrl = useCallback(async () => { if (props.request && props.request.workspaceId) { try { const linkObject = await apiCall( `${ApiEndpoint.Workspaces}/${props.request.workspaceId}/${ApiEndpoint.AirlockRequests}/${props.request.id}/${ApiEndpoint.AirlockLink}`, HttpMethod.Get, props.workspaceApplicationIdURI ); setSasUrl(linkObject.containerUrl); } catch (err: any) { err.userMessage = 'Error retrieving storage link'; setApiSasUrlError(err); setSasUrlError(true); } } }, [apiCall, props.request, props.workspaceApplicationIdURI]); const parseSasUrl = (sasUrl: string) => { const match = sasUrl.match(/https:\/\/(.*?).blob.core.windows.net\/(.*)\?(.*)$/); if (!match) { return } return { StorageAccountName: match[1], containerName: match[2], sasToken: match[3] } }; const handleCopySasUrl = () => { if (!sasUrl) { return; } navigator.clipboard.writeText(sasUrl); setCopyToolTipMessage("Copied") setTimeout(() => setCopyToolTipMessage(COPY_TOOL_TIP_DEFAULT_MESSAGE), 3000); } const getAzureCliCommand = (sasUrl: string) => { let containerDetails = parseSasUrl(sasUrl) if (!containerDetails) { return ''; } let cliCommand = ""; if (props.request.status === AirlockRequestStatus.Draft) { cliCommand = `az storage blob upload --file </path/to/file> --name <filename.filetype> --account-name ${containerDetails.StorageAccountName} --type block --container-name ${containerDetails.containerName} --sas-token "${containerDetails.sasToken}"` } else { cliCommand = `az storage blob download-batch --destination </destination/path/for/file> --source ${containerDetails.containerName} --account-name ${containerDetails.StorageAccountName} --sas-token "${containerDetails.sasToken}"` } return cliCommand; }; useEffect(() => { generateSasUrl() }, [generateSasUrl]); return ( <Stack> <Pivot aria-label="Storage options"> <PivotItem headerText="SAS URL"> <Stack> <Stack.Item style={{ paddingTop: '10px', paddingBottom: '10px' }}> { props.request.status === AirlockRequestStatus.Draft ? <small>Use the storage container SAS URL to upload your request file.</small> : <small>Use the storage container SAS URL to view the request file.</small> } <Stack horizontal styles={{ root: { alignItems: 'center', paddingTop: '7px' } }}> <Stack.Item grow> <TextField readOnly value={sasUrl} /> </Stack.Item> <TooltipHost content={copyToolTipMessage}> <PrimaryButton iconProps={{ iconName: 'copy' }} styles={{ root: { minWidth: '40px' } }} onClick={() => { handleCopySasUrl() }} /> </TooltipHost> </Stack> </Stack.Item> { props.request.status === AirlockRequestStatus.Draft && <MessageBar messageBarType={MessageBarType.info}> Please upload a single file. Only single-file imports (including zip files) are supported. </MessageBar> } </Stack> </PivotItem> <PivotItem headerText="CLI"> <Stack> <Stack.Item style={{ paddingTop: '10px', paddingBottom: '10px' }}> <small>Use Azure command-line interface (Azure CLI) to interact with the storage container.</small> <hr style={{ border: "1px solid #faf9f8", borderRadius: "1px" }} /> </Stack.Item> <Stack.Item style={{ paddingTop: '10px' }}> <CliCommand command={sasUrl ? getAzureCliCommand(sasUrl) : ''} title={props.request.status === AirlockRequestStatus.Draft ? "Upload a file to the storage container" : "Download the file from the storage container"} isLoading={!sasUrl && !sasUrlError} /> </Stack.Item> </Stack> </PivotItem> </Pivot> { sasUrlError && <ExceptionLayout e={apiSasUrlError} /> } </Stack> ); };
AzureTRE/ui/app/src/components/shared/airlock/AirlockRequestFilesSection.tsx/0
{ "file_path": "AzureTRE/ui/app/src/components/shared/airlock/AirlockRequestFilesSection.tsx", "repo_id": "AzureTRE", "token_count": 2332 }
140
import React, { useContext, useEffect, useState } from 'react'; import { Nav, INavLinkGroup, INavStyles } from '@fluentui/react/lib/Nav'; import { useNavigate } from 'react-router-dom'; import { ApiEndpoint } from '../../models/apiEndpoints'; import { WorkspaceService } from '../../models/workspaceService'; import { WorkspaceContext } from '../../contexts/WorkspaceContext'; import { SharedService } from '../../models/sharedService'; // TODO: // - active item is sometimes lost interface WorkspaceLeftNavProps { workspaceServices: Array<WorkspaceService>, sharedServices: Array<SharedService>, setWorkspaceService: (workspaceService: WorkspaceService) => void, addWorkspaceService: (w: WorkspaceService) => void } export const WorkspaceLeftNav: React.FunctionComponent<WorkspaceLeftNavProps> = (props: WorkspaceLeftNavProps) => { const navigate = useNavigate(); const emptyLinks: INavLinkGroup[] = [{ links: [] }]; const [serviceLinks, setServiceLinks] = useState(emptyLinks); const workspaceCtx = useContext(WorkspaceContext); useEffect(() => { const getWorkspaceServices = async () => { // get the workspace services if(!workspaceCtx.workspace.id) return; let serviceLinkArray: Array<any> = []; props.workspaceServices.forEach((service: WorkspaceService) => { serviceLinkArray.push( { name: service.properties.display_name, url: `/${ApiEndpoint.Workspaces}/${workspaceCtx.workspace.id}/${ApiEndpoint.WorkspaceServices}/${service.id}`, key: `/${ApiEndpoint.Workspaces}/${workspaceCtx.workspace.id}/${ApiEndpoint.WorkspaceServices}/${service.id}` }); }); let sharedServiceLinkArray: Array<any> = []; props.sharedServices.forEach((service: SharedService) => { sharedServiceLinkArray.push( { name: service.properties.display_name, url: `/${ApiEndpoint.Workspaces}/${workspaceCtx.workspace.id}/${ApiEndpoint.SharedServices}/${service.id}`, key: `/${ApiEndpoint.Workspaces}/${workspaceCtx.workspace.id}/${ApiEndpoint.SharedServices}/${service.id}` }); }); const serviceNavLinks: INavLinkGroup[] = [ { links: [ { name: 'Overview', key: `/${ApiEndpoint.Workspaces}/${workspaceCtx.workspace.id}`, url: `/${ApiEndpoint.Workspaces}/${workspaceCtx.workspace.id}`, isExpanded: true }, { name: 'Services', key: `/${ApiEndpoint.Workspaces}/${workspaceCtx.workspace.id}/${ApiEndpoint.WorkspaceServices}`, url: `/${ApiEndpoint.Workspaces}/${workspaceCtx.workspace.id}/${ApiEndpoint.WorkspaceServices}`, isExpanded: true, links: serviceLinkArray }, { name: 'Shared Services', key: `/${ApiEndpoint.Workspaces}/${workspaceCtx.workspace.id}/${ApiEndpoint.SharedServices}`, url: `/${ApiEndpoint.Workspaces}/${workspaceCtx.workspace.id}/${ApiEndpoint.SharedServices}`, isExpanded: false, links: sharedServiceLinkArray } ] } ]; // Only show airlock link if enabled for workspace if (workspaceCtx.workspace.properties !== undefined && workspaceCtx.workspace.properties.enable_airlock) { serviceNavLinks[0].links.push({ name: 'Airlock', key: `/${ApiEndpoint.Workspaces}/${workspaceCtx.workspace.id}/${ApiEndpoint.AirlockRequests}`, url: `/${ApiEndpoint.Workspaces}/${workspaceCtx.workspace.id}/${ApiEndpoint.AirlockRequests}`, }); } setServiceLinks(serviceNavLinks); }; getWorkspaceServices(); }, [props.workspaceServices, props.sharedServices, workspaceCtx.workspace.id, workspaceCtx.workspace.properties]); return ( <> <Nav onLinkClick={(e, item) => { e?.preventDefault(); if (!item || !item.url) return; let selectedService = props.workspaceServices.find((w) => item.key?.indexOf(w.id.toString()) !== -1); if (selectedService) { props.setWorkspaceService(selectedService); } navigate(item.url) }} ariaLabel="TRE Workspace Left Navigation" groups={serviceLinks} styles={navStyles} /> </> ); }; const navStyles: Partial<INavStyles> = { root: { boxSizing: 'border-box', border: '1px solid #eee', paddingBottom: 40 }, // these link styles override the default truncation behavior link: { whiteSpace: 'normal', lineHeight: 'inherit', }, };
AzureTRE/ui/app/src/components/workspaces/WorkspaceLeftNav.tsx/0
{ "file_path": "AzureTRE/ui/app/src/components/workspaces/WorkspaceLeftNav.tsx", "repo_id": "AzureTRE", "token_count": 2002 }
141