text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
class TREError extends Error {
constructor() {
super();
Object.setPrototypeOf(this, new.target.prototype);
}
}
export class APIError extends TREError {
status?: number;
exception?: any;
userMessage?: string;
endpoint?: string;
}
| AzureTRE/ui/app/src/models/exceptions.ts/0 | {
"file_path": "AzureTRE/ui/app/src/models/exceptions.ts",
"repo_id": "AzureTRE",
"token_count": 79
} | 142 |
import { configureStore } from "@reduxjs/toolkit";
import operationsReducer from "../components/shared/notifications/operationsSlice";
export const store = configureStore({
reducer: {
operations: operationsReducer
}
});
export type AppDispatch = typeof store.dispatch;
export type RootState = ReturnType<typeof store.getState>;
| AzureTRE/ui/app/src/store/store.ts/0 | {
"file_path": "AzureTRE/ui/app/src/store/store.ts",
"repo_id": "AzureTRE",
"token_count": 96
} | 143 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import argparse
from fairseq.models.transformer_lm import TransformerLanguageModel
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="../../data/PubMed/data-bin")
parser.add_argument("--model_dir", type=str, default="../../checkpoints/Pre-trained-BioGPT")
parser.add_argument("--model_file", type=str, default="checkpoint.pt")
parser.add_argument("--bpecodes", type=str, default="../../data/bpecodes")
parser.add_argument("--beam", type=int, default=5)
parser.add_argument("--lenpen", type=float, default=1.0)
parser.add_argument("--min_len", type=int, default=100)
parser.add_argument("--lower", default=False, action="store_true")
args, _ = parser.parse_known_args()
def main(args):
m = TransformerLanguageModel.from_pretrained(
args.model_dir,
args.model_file,
args.data_dir,
tokenizer='moses',
bpe='fastbpe',
bpe_codes=args.bpecodes,
min_len=args.min_len,
max_len_b=1024,
beam=args.beam,
lenpen=args.lenpen,
max_tokens=12000)
print(m.cfg)
if m.cfg.common.fp16:
print('Converting to float 16')
m.half()
m.cuda()
while True:
print("Please input and press enter:")
_src = input().strip()
src_tokens = m.encode(_src)
generate = m.generate([src_tokens], beam=args.beam)[0]
output = m.decode(generate[0]["tokens"])
print(output)
if __name__ == "__main__":
main(args) | BioGPT/examples/text-generation/interactive.py/0 | {
"file_path": "BioGPT/examples/text-generation/interactive.py",
"repo_id": "BioGPT",
"token_count": 656
} | 144 |
# Transparency FAQ for BitBLAS
## What is BitBLAS?
BitBLAS is a lightweight framework designed for generating high-performance CUDA/HIP code for BLAS (Basic Linear Algebra Subprograms) operators, emphasizing swizzling and layout propagation. It leverages a Domain-Specific Language (DSL), specifically TIR Script, to offer flexibility and efficiency in mathematical computations. BitBLAS aims to provide performance comparable to cuBLAS while introducing more flexibility and efficiency through its unique features.
## What can BitBLAS do?
BitBLAS enhances the performance and flexibility of linear algebra computations with features like:
- Auto Tensorization: Automatically optimizes code for various data types and operators, supporting FP16, INT8, and mixed precision operations.
- Dynamic Symbolic Support: Facilitates kernel generation with dynamic shapes, enabling efficient computation for variable data sizes.
- High-Performance Computing: Offers optimized performance for different data operations, including FP16xFP16, FP16xINT4/2/1, INT8xINT8, and INT8xINT4/2/1, among others.
## What are BitBLAS's intended uses?
BitBLAS is intended for developers and researchers who require high-performance linear algebra computations in their CUDA/HIP-based applications. It is particularly beneficial for:
- Machine Learning and Deep Learning: Accelerating training and inference computations.
- Scientific Computing: Handling large-scale linear algebra operations efficiently.
- High-Performance Computing (HPC): Enhancing performance in computationally intensive applications.
## Data Handling and Privacy
This project is committed to protecting privacy and ensuring a secure environment for all users. It is designed with the following principles in mind:
- No User Data Collection: The project does not collect, process, or store any personal or privacy-sensitive data from users. Users can utilize the project's features without the concern of their data being recorded or misused.
- Transparency: We believe in complete transparency with our community. As such, we clearly state that no user data is collected or processed at any stage of the project's usage.
- User Control and Privacy: Since the project does not involve user data, individuals retain full control over their information. Users can interact with the project knowing their privacy is safeguarded.
## Security Considerations
The security of the project and its users is paramount. Despite not handling user data, we adhere to best practices in software development to ensure the project's integrity and safety:
- Regular Security Audits: The project undergoes regular security reviews and audits to identify and remediate any potential vulnerabilities, ensuring the highest level of security.
- Open Source Security: As an open-source project, our code is available for review, allowing the community to examine and contribute to the project's security.
- Security Incident Response: In the unlikely event of a security issue, we have established procedures for prompt and effective response to investigate and address the concern.
- Community Involvement: We encourage the community to report any security concerns or suggestions for improvement. Our project's success and security are enhanced by active community participation and feedback.
## Compliance and Licensing
As a project initiated and released by Microsoft, we adhere strictly to legal and regulatory standards to ensure our contributions meet the highest compliance criteria. Here are key points regarding our compliance and licensing practices:
- Microsoft's Commitment: This project is part of Microsoft's commitment to supporting and contributing to the open-source community. We ensure that all contributions are compliant with current legal standards and practices.
- MIT License: The project is licensed under the MIT License, one of the most permissive and open licenses available. This license allows for almost unrestricted freedom to use, modify, and distribute the project, providing that the license and copyright notice are included with any substantial portion of the software.
- License Clarity: We have clearly indicated the licensing terms within the project repository to ensure that all users and contributors are informed of their rights and obligations when using or contributing to the project.
| BitBLAS/TRANSPARENCY.md/0 | {
"file_path": "BitBLAS/TRANSPARENCY.md",
"repo_id": "BitBLAS",
"token_count": 870
} | 145 |
# coding=utf-8
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch LLaMA model."""
import math
import warnings
from typing import List, Optional, Tuple, Union
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.cache_utils import Cache, DynamicCache, StaticCache
from transformers.modeling_attn_mask_utils import AttentionMaskConverter
from transformers.modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
QuestionAnsweringModelOutput,
SequenceClassifierOutputWithPast,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
from transformers.utils import (
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_flash_attn_2_available,
is_flash_attn_greater_or_equal_2_10,
logging,
replace_return_docstrings,
)
from configuration_bitnet import BitnetConfig
from utils_quant import BitLinear
if is_flash_attn_2_available():
from flash_attn import flash_attn_func, flash_attn_varlen_func
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "BitnetConfig"
def _get_unpad_data(attention_mask):
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
max_seqlen_in_batch = seqlens_in_batch.max().item()
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
return (
indices,
cu_seqlens,
max_seqlen_in_batch,
)
class BitnetRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
BitnetRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
ALL_LAYERNORM_LAYERS.append(BitnetRMSNorm)
class BitnetRotaryEmbedding(nn.Module):
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
super().__init__()
self.scaling_factor = scaling_factor
self.dim = dim
self.max_position_embeddings = max_position_embeddings
self.base = base
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
self.register_buffer("inv_freq", inv_freq)
# For BC we register cos and sin cached
self.max_seq_len_cached = max_position_embeddings
t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
t = t / self.scaling_factor
freqs = torch.outer(t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1)
self.register_buffer("_cos_cached", emb.cos().to(torch.get_default_dtype()), persistent=False)
self.register_buffer("_sin_cached", emb.sin().to(torch.get_default_dtype()), persistent=False)
@property
def sin_cached(self):
logger.warning_once(
"The sin_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use "
"the forward method of RoPE from now on instead. It is not used in the `BitnetAttention` class"
)
return self._sin_cached
@property
def cos_cached(self):
logger.warning_once(
"The cos_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use "
"the forward method of RoPE from now on instead. It is not used in the `BitnetAttention` class"
)
return self._cos_cached
@torch.no_grad()
def forward(self, x, position_ids):
# x: [bs, num_attention_heads, seq_len, head_size]
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
position_ids_expanded = position_ids[:, None, :].float()
# Force float32 since bfloat16 loses precision on long contexts
# See https://github.com/huggingface/transformers/pull/29285
device_type = x.device.type
device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos()
sin = emb.sin()
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
class BitnetMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = BitLinear(
self.hidden_size, self.intermediate_size, bias=False,
weight_bits=config.weight_bits, input_bits=config.input_bits,
)
self.up_proj = BitLinear(
self.hidden_size, self.intermediate_size, bias=False,
weight_bits=config.weight_bits, input_bits=config.input_bits,
)
self.down_proj = BitLinear(
self.intermediate_size, self.hidden_size, bias=False,
weight_bits=config.weight_bits, input_bits=config.input_bits,
)
self.act_fn = ACT2FN[config.hidden_act]
self.ffn_layernorm = BitnetRMSNorm(self.intermediate_size, eps=config.rms_norm_eps)
def forward(self, x):
x = self.act_fn(self.gate_proj(x)) * self.up_proj(x)
x = self.ffn_layernorm(x)
x = self.down_proj(x)
return x
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
class BitnetAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: BitnetConfig, layer_idx: Optional[int] = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
if layer_idx is None:
logger.warning_once(
f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.attention_dropout = config.attention_dropout
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.hidden_size // self.num_heads
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.max_position_embeddings = config.max_position_embeddings
self.rope_theta = config.rope_theta
self.is_causal = True
if (self.head_dim * self.num_heads) != self.hidden_size:
raise ValueError(
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
f" and `num_heads`: {self.num_heads})."
)
self.q_proj = BitLinear(
self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias,
weight_bits=config.weight_bits, input_bits=config.input_bits,
)
self.k_proj = BitLinear(
self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias,
weight_bits=config.weight_bits, input_bits=config.input_bits,
)
self.v_proj = BitLinear(
self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias,
weight_bits=config.weight_bits, input_bits=config.input_bits,
)
self.o_proj = BitLinear(
self.hidden_size, self.hidden_size, bias=config.attention_bias,
weight_bits=config.weight_bits, input_bits=config.input_bits,
)
self._init_rope()
self.inner_attn_ln = BitnetRMSNorm(self.hidden_size, eps=config.rms_norm_eps)
def _init_rope(self):
if self.config.rope_scaling is None:
self.rotary_emb = BitnetRotaryEmbedding(
self.head_dim,
max_position_embeddings=self.max_position_embeddings,
base=self.rope_theta,
)
else:
raise NotImplementedError
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Cache] = None,
output_attentions: bool = False,
use_cache: bool = False,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
past_key_value = getattr(self, "past_key_value", past_key_value)
cos, sin = self.rotary_emb(value_states, position_ids)
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_value is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
if attention_mask is not None: # no matter the length, we just slice it
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
# upcast attention to fp32
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
attn_output = torch.matmul(attn_weights, value_states)
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
attn_output = self.inner_attn_ln(attn_output)
attn_output = self.o_proj(attn_output)
if not output_attentions:
attn_weights = None
return attn_output, attn_weights, past_key_value
class BitnetFlashAttention2(BitnetAttention):
"""
Bitnet flash attention module. This module inherits from `BitnetAttention` as the weights of the module stays
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
flash attention and deal with padding tokens in case the input contains any of them.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Cache] = None,
output_attentions: bool = False,
use_cache: bool = False,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
output_attentions = False
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
# Flash attention requires the input to have the shape
# batch_size x seq_length x head_dim x hidden_dim
# therefore we just need to keep the original shape
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = self.rotary_emb(value_states, position_ids)
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
past_key_value = getattr(self, "past_key_value", past_key_value)
if past_key_value is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
# TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
# to be able to avoid many of these transpose/reshape/view.
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
dropout_rate = self.attention_dropout if self.training else 0.0
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
# therefore the input hidden states gets silently casted in float32. Hence, we need
# cast them back in the correct dtype just to be sure everything works as expected.
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
# in fp32. (BitnetRMSNorm handles it correctly)
input_dtype = query_states.dtype
if input_dtype == torch.float32:
if torch.is_autocast_enabled():
target_dtype = torch.get_autocast_gpu_dtype()
# Handle the case where the model is quantized
elif hasattr(self.config, "_pre_quantization_dtype"):
target_dtype = self.config._pre_quantization_dtype
else:
target_dtype = self.q_proj.weight.dtype
logger.warning_once(
f"The input hidden states seems to be silently casted in float32, this might be related to"
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
f" {target_dtype}."
)
query_states = query_states.to(target_dtype)
key_states = key_states.to(target_dtype)
value_states = value_states.to(target_dtype)
attn_output = self._flash_attention_forward(
query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
)
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
attn_output = self.inner_attn_ln(attn_output)
attn_output = self.o_proj(attn_output)
if not output_attentions:
attn_weights = None
return attn_output, attn_weights, past_key_value
def _flash_attention_forward(
self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
):
"""
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
first unpad the input, then computes the attention scores and pad the final attention scores.
Args:
query_states (`torch.Tensor`):
Input query states to be passed to Flash Attention API
key_states (`torch.Tensor`):
Input key states to be passed to Flash Attention API
value_states (`torch.Tensor`):
Input value states to be passed to Flash Attention API
attention_mask (`torch.Tensor`):
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
position of padding tokens and 1 for the position of non-padding tokens.
dropout (`float`):
Attention dropout
softmax_scale (`float`, *optional*):
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
"""
if not self._flash_attn_uses_top_left_mask:
causal = self.is_causal
else:
# TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in BitnetFlashAttention2 __init__.
causal = self.is_causal and query_length != 1
# Contains at least one padding token in the sequence
if attention_mask is not None:
batch_size = query_states.shape[0]
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
query_states, key_states, value_states, attention_mask, query_length
)
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
attn_output_unpad = flash_attn_varlen_func(
query_states,
key_states,
value_states,
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k=cu_seqlens_k,
max_seqlen_q=max_seqlen_in_batch_q,
max_seqlen_k=max_seqlen_in_batch_k,
dropout_p=dropout,
softmax_scale=softmax_scale,
causal=causal,
)
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
else:
attn_output = flash_attn_func(
query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
)
return attn_output
def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
key_layer = index_first_axis(
key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
)
value_layer = index_first_axis(
value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
)
if query_length == kv_seq_len:
query_layer = index_first_axis(
query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
)
cu_seqlens_q = cu_seqlens_k
max_seqlen_in_batch_q = max_seqlen_in_batch_k
indices_q = indices_k
elif query_length == 1:
max_seqlen_in_batch_q = 1
cu_seqlens_q = torch.arange(
batch_size + 1, dtype=torch.int32, device=query_layer.device
) # There is a memcpy here, that is very bad.
indices_q = cu_seqlens_q[:-1]
query_layer = query_layer.squeeze(1)
else:
# The -q_len: slice assumes left padding.
attention_mask = attention_mask[:, -query_length:]
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
return (
query_layer,
key_layer,
value_layer,
indices_q,
(cu_seqlens_q, cu_seqlens_k),
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
)
LLAMA_ATTENTION_CLASSES = {
"eager": BitnetAttention,
"flash_attention_2": BitnetFlashAttention2,
}
class BitnetDecoderLayer(nn.Module):
def __init__(self, config: BitnetConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = LLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
self.mlp = BitnetMLP(config)
self.input_layernorm = BitnetRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = BitnetRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*):
attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
query_sequence_length, key_sequence_length)` if default attention is used.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
"""
if "padding_mask" in kwargs:
warnings.warn(
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
)
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
LLAMA_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`BitnetConfig`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
@add_start_docstrings(
"The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
LLAMA_START_DOCSTRING,
)
class BitnetPreTrainedModel(PreTrainedModel):
config_class = BitnetConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["BitnetDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn_2 = True
_supports_sdpa = False
_supports_cache_class = True
def _init_weights(self, module):
std = self.config.initializer_range
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _setup_cache(self, cache_cls, max_batch_size, max_cache_len: Optional[int] = None):
if self.config._attn_implementation == "flash_attention_2" and cache_cls == StaticCache:
raise ValueError(
"`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` "
"make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers"
)
for layer in self.model.layers:
device = layer.input_layernorm.weight.device
if hasattr(self.config, "_pre_quantization_dtype"):
dtype = self.config._pre_quantization_dtype
else:
dtype = layer.self_attn.o_proj.weight.dtype
layer.self_attn.past_key_value = cache_cls(
self.config, max_batch_size, max_cache_len, device=device, dtype=dtype
)
def _reset_cache(self):
for layer in self.model.layers:
layer.self_attn.past_key_value = None
LLAMA_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`BitnetTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
Indices can be obtained using [`BitnetTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
`past_key_values`).
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
information on the default strategy.
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.n_positions - 1]`.
[What are position IDs?](../glossary#position-ids)
past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
Two formats are allowed:
- a [`~cache_utils.Cache`] instance;
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
cache format.
The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
legacy cache format will be returned.
If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
the complete sequence length.
"""
@add_start_docstrings(
"The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
LLAMA_START_DOCSTRING,
)
class BitnetModel(BitnetPreTrainedModel):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`BitnetDecoderLayer`]
Args:
config: BitnetConfig
"""
def __init__(self, config: BitnetConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[BitnetDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = BitnetRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
@add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[Tuple, BaseModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
)
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
)
use_cache = False
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
past_seen_tokens = 0
if use_cache: # kept for BC (cache positions)
if not isinstance(past_key_values, StaticCache):
past_key_values = DynamicCache.from_legacy_cache(past_key_values)
past_seen_tokens = past_key_values.get_seq_length()
if cache_position is None:
if isinstance(past_key_values, StaticCache):
raise ValueError("cache_position is a required argument when using StaticCache.")
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position)
# embed positions
hidden_states = inputs_embeds
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
next_decoder_cache = None
for decoder_layer in self.layers:
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.gradient_checkpointing and self.training:
layer_outputs = self._gradient_checkpointing_func(
decoder_layer.__call__,
hidden_states,
causal_mask,
position_ids,
past_key_values,
output_attentions,
use_cache,
cache_position,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_value=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache = layer_outputs[2 if output_attentions else 1]
if output_attentions:
all_self_attns += (layer_outputs[1],)
hidden_states = self.norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = None
if use_cache:
next_cache = (
next_decoder_cache.to_legacy_cache() if isinstance(next_decoder_cache, Cache) else next_decoder_cache
)
if not return_dict:
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
# TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static
# KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.
# (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using
# `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114
def _update_causal_mask(self, attention_mask, input_tensor, cache_position):
if self.config._attn_implementation == "flash_attention_2":
if attention_mask is not None and 0.0 in attention_mask:
return attention_mask
return None
dtype, device = input_tensor.dtype, input_tensor.device
min_dtype = torch.finfo(dtype).min
sequence_length = input_tensor.shape[1]
if hasattr(self.layers[0].self_attn, "past_key_value"): # static cache
target_length = self.config.max_position_embeddings
else: # dynamic cache
target_length = (
attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else cache_position[-1] + 1
)
causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
if attention_mask.dim() == 2:
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)
causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype)
elif attention_mask.dim() == 4:
# backwards compatibility: we allow passing a 4D attention mask shorter than the input length with
# cache. In that case, the 4D attention mask attends to the newest tokens only.
if attention_mask.shape[-2] < cache_position[0] + sequence_length:
offset = cache_position[0]
else:
offset = 0
mask_shape = attention_mask.shape
mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype
causal_mask[
: mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]
] = mask_slice
return causal_mask
class BitnetForCausalLM(BitnetPreTrainedModel):
_tied_weights_keys = ["lm_head.weight"]
def __init__(self, config):
super().__init__(config)
self.model = BitnetModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.embed_tokens
def set_input_embeddings(self, value):
self.model.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model = decoder
def get_decoder(self):
return self.model
@add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
Example:
```python
>>> from transformers import LlamaTokenizer, LlamaForCausalLM
>>> model = LlamaForCausalLM.from_pretrained("meta-llama/Bitnet-2-7b-hf")
>>> tokenizer = LlamaTokenizer.from_pretrained("meta-llama/Bitnet-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
hidden_states = outputs[0]
logits = self.lm_head(hidden_states)
logits = logits.float()
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
shift_logits = shift_logits.view(-1, self.config.vocab_size)
shift_labels = shift_labels.view(-1)
# Enable model parallelism
shift_labels = shift_labels.to(shift_logits.device)
loss = loss_fct(shift_logits, shift_labels)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, **kwargs
):
# With static cache, the `past_key_values` is None
# TODO joao: standardize interface for the different Cache classes and remove of this if
has_static_cache = False
if past_key_values is None:
past_key_values = getattr(self.model.layers[0].self_attn, "past_key_value", None)
has_static_cache = past_key_values is not None
past_length = 0
if past_key_values is not None:
if isinstance(past_key_values, Cache):
past_length = cache_position[0] if cache_position is not None else past_key_values.get_seq_length()
max_cache_length = (
torch.tensor(past_key_values.get_max_length(), device=input_ids.device)
if past_key_values.get_max_length() is not None
else None
)
cache_length = past_length if max_cache_length is None else torch.min(max_cache_length, past_length)
# TODO joao: remove this `else` after `generate` prioritizes `Cache` objects
else:
cache_length = past_length = past_key_values[0][0].shape[2]
max_cache_length = None
# Keep only the unprocessed tokens:
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
# some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
# input)
if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
# 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
# input_ids based on the past_length.
elif past_length < input_ids.shape[1]:
input_ids = input_ids[:, past_length:]
# 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
# If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
if (
max_cache_length is not None
and attention_mask is not None
and cache_length + input_ids.shape[1] > max_cache_length
):
attention_mask = attention_mask[:, -max_cache_length:]
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past_key_values:
position_ids = position_ids[:, -input_ids.shape[1] :]
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and past_key_values is None:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
# The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
# recompiles graphs as the stride of the inputs is a guard. Ref: https://github.com/huggingface/transformers/pull/29114
# TODO: use `next_tokens` directly instead.
model_inputs = {"input_ids": input_ids.contiguous()}
input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]
if cache_position is None:
cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device)
else:
cache_position = cache_position[-input_length:]
if has_static_cache:
past_key_values = None
model_inputs.update(
{
"position_ids": position_ids,
"cache_position": cache_position,
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"attention_mask": attention_mask,
}
)
return model_inputs
@staticmethod
def _reorder_cache(past_key_values, beam_idx):
reordered_past = ()
for layer_past in past_key_values:
reordered_past += (
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
)
return reordered_past
def _post_process_weights(self):
for name, module in self.model.named_modules():
if hasattr(module, "post_process_weights"):
print("Post processing weights for module", name)
module.post_process_weights()
@add_start_docstrings(
"""
The LLaMa Model transformer with a sequence classification head on top (linear layer).
[`BitnetForSequenceClassification`] uses the last token in order to do the classification, as other causal models
(e.g. GPT-2) do.
Since it does classification on the last token, it requires to know the position of the last token. If a
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
each row of the batch).
""",
LLAMA_START_DOCSTRING,
)
class BitnetForSequenceClassification(BitnetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.model = BitnetModel(config)
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.embed_tokens
def set_input_embeddings(self, value):
self.model.embed_tokens = value
@add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.model(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
if input_ids is not None:
batch_size = input_ids.shape[0]
else:
batch_size = inputs_embeds.shape[0]
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
if self.config.pad_token_id is None:
sequence_lengths = -1
else:
if input_ids is not None:
# if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
sequence_lengths = sequence_lengths % input_ids.shape[-1]
sequence_lengths = sequence_lengths.to(logits.device)
else:
sequence_lengths = -1
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
loss = None
if labels is not None:
labels = labels.to(logits.device)
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutputWithPast(
loss=loss,
logits=pooled_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""
The Bitnet Model transformer with a span classification head on top for extractive question-answering tasks like
SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
LLAMA_START_DOCSTRING,
)
class BitnetForQuestionAnswering(BitnetPreTrainedModel):
base_model_prefix = "transformer"
# Copied from transformers.models.bloom.modeling_bloom.BloomForQuestionAnswering.__init__ with Bloom->Bitnet
def __init__(self, config):
super().__init__(config)
self.transformer = BitnetModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.transformer.embed_tokens
def set_input_embeddings(self, value):
self.transformer.embed_tokens = value
@add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, QuestionAnsweringModelOutput]:
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1).to(start_logits.device)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1).to(end_logits.device)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
) | BitBLAS/integration/BitNet/modeling_bitnet.py/0 | {
"file_path": "BitBLAS/integration/BitNet/modeling_bitnet.py",
"repo_id": "BitBLAS",
"token_count": 27678
} | 146 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""Base infra"""
from .analysis import (
BlockInfo,
IterInfo,
collect_block_iter_vars_used_in_access_region,
collect_vars_used_in_prim_expr,
detect_dominant_read,
is_broadcast_epilogue,
normalize_prim_func,
)
from .common_schedules import get_block, get_output_blocks, try_inline, try_inline_contiguous_spatial
from .schedule_rule import ScheduleRule
from .transform import ApplyDefaultSchedule, ApplyFastTuning
from .utils import fast_tune, fast_tune_with_dynamic_range
from .roller import *
| BitBLAS/python/bitblas/base/__init__.py/0 | {
"file_path": "BitBLAS/python/bitblas/base/__init__.py",
"repo_id": "BitBLAS",
"token_count": 202
} | 147 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from .tir import get_analyzer_by_tir # pylint: disable=unused-import
| BitBLAS/python/bitblas/base/roller/shape_inference/__init__.py/0 | {
"file_path": "BitBLAS/python/bitblas/base/roller/shape_inference/__init__.py",
"repo_id": "BitBLAS",
"token_count": 44
} | 148 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from .lop3 import get_lop3_intrin_group # noqa: F401
| BitBLAS/python/bitblas/gpu/intrin/__init__.py/0 | {
"file_path": "BitBLAS/python/bitblas/gpu/intrin/__init__.py",
"repo_id": "BitBLAS",
"token_count": 41
} | 149 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from bitblas.gpu.matmul_analysis import get_propagate_map
from typing import Literal
from tvm import te, IRModule, DataType
from tvm.tir import IndexMap
def select_implementation(
M: int,
N: int,
datatype: Literal["float16", "int8", "e4m3_float8", "e5m2_float8"] = "float16",
dequantize_bits: int = -1,
storage_dtype: Literal["float16", "int8", "uint8", "int32", "uint32"] = "float16",
propagate_kind: Literal["A", "B"] = "B",
transpose_matrix: bool = False,
transform_kind: int = 0,
target_instruction: Literal["nvidia-mma"] = "nvidia-mma",
):
if target_instruction != "nvidia-mma":
raise ValueError("Currently only support nvidia-mma instruction")
# This is trick to get the basic tile size for the current datatype
# as for nvidia tensorcore instruction, the basic tile size is 16x16/16x32 for float16/int8
l = r = 16 # noqa: E741
if datatype in ["int8", "e4m3_float8", "e5m2_float8"]:
l, r = 16, 32 # noqa: E741
intra_index_map, _ = get_propagate_map(
transpose_matrix, dtype=datatype, matrix_name=propagate_kind)
target_dtype = DataType(datatype)
scaling_factor = 1
if dequantize_bits > 0 and dequantize_bits < target_dtype.bits:
scaling_factor = ((target_dtype.bits // dequantize_bits) * DataType(storage_dtype).bits //
target_dtype.bits)
r = r // scaling_factor
initial_indices = intra_index_map.initial_indices
scaling_final_indices = intra_index_map.map_indices(initial_indices[:-1] +
[initial_indices[-1] * scaling_factor])
scaling_final_indices = scaling_final_indices[:-1] + [
scaling_final_indices[-1] // scaling_factor
]
intra_index_map = IndexMap(
initial_indices,
scaling_final_indices,
None,
)
inp = te.placeholder((M, N // scaling_factor), name="inp", dtype=storage_dtype)
args = [inp]
if transform_kind >= 1:
arg = args[-1]
inter_warp = te.compute(
(M // l, (N // scaling_factor) // r, l, r),
lambda i, j, ii, jj: arg[i * l + ii, j * r + jj],
name="inter_warp_permutate",
)
args.append(inter_warp)
if transform_kind >= 2:
arg = args[-1]
def fcompute(*args):
warp_i, warp_j = args[-2:]
spatial_args = args[:-2]
permutate_i, permutate_j = intra_index_map.map_indices([warp_i, warp_j])
new_index = (*spatial_args, permutate_i, permutate_j)
return arg[new_index]
intra_warp = te.compute(
(M // l, (N // scaling_factor) // r, l, r),
fcompute,
name="intra_warp_permutate",
)
args.append(intra_warp)
args = [args[0], args[-1]]
func = te.create_prim_func(args)
return IRModule.from_expr(func)
| BitBLAS/python/bitblas/ops/impl/ladder_permutate_impl.py/0 | {
"file_path": "BitBLAS/python/bitblas/ops/impl/ladder_permutate_impl.py",
"repo_id": "BitBLAS",
"token_count": 1419
} | 150 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from typing import Dict, Tuple
from tvm.ir import IRModule
from tvm.ir.transform import PassContext, module_pass
from tvm import tir
from tvm.tir.schedule import BlockRV
from mlc_llm.quantization import quantization_schemes, GroupQuantizationSpec
from bitblas.gpu.gemv import is_gemv
from bitblas.gpu.matmul_analysis import (
get_reduction_blocks,
get_index_map,
get_root_block,
get_dequantize_block,
)
from bitblas.base import (
normalize_prim_func,
try_inline_contiguous_spatial,
)
# Define a module pass to annotate dequantization information
@module_pass(opt_level=0, name="AnnotateDecodeInformation")
class AnnotateDecodeInformation:
def __init__(self, spec: str = "q4f16_0"):
# Validate and store the specified quantization scheme
if spec not in quantization_schemes:
raise ValueError(f"Quantization scheme {spec} not found")
self.quantize_scheme = quantization_schemes[spec]
def detect_matmul(self, func: tir.PrimFunc) -> bool:
"""Detect if the given function represents a matrix multiplication."""
sch = tir.Schedule(func)
root_block = get_root_block(sch)
blocks = sch.get_child_blocks(root_block)
# Identify reduction blocks to infer matmul operations
reduction_blocks = get_reduction_blocks(sch, blocks)
if not reduction_blocks:
return False
# Check for index map patterns typical of matmul operations
main_block = reduction_blocks[0]
main_block_stmt = sch.get(main_block)
index_maps = get_index_map(main_block_stmt)
_is_matmul = index_maps is not None
block_infos = normalize_prim_func(sch)
block_infos = try_inline_contiguous_spatial(sch, block_infos)
block_info = block_infos[0]
_is_gemv = True
if len(block_info.iters) not in [2, 3]:
# either [B, S, R] = [B, S, R] * [B, R]
# or [S, R] = [S, R] * [R]
_is_gemv = False
if _is_gemv:
_is_gemv = is_gemv(sch, block_info)
return _is_matmul or _is_gemv
def transform_module(self, mod: IRModule, _: PassContext) -> IRModule:
"""Annotate dequantize information for all applicable functions in the module."""
for g_var, func in mod.functions.items():
if not isinstance(func, tir.PrimFunc) or g_var.name_hint == "main":
continue
if not self.detect_matmul(func):
continue # Process only if matmul is detected
sch = tir.Schedule(func)
root_block = get_root_block(sch)
blocks = sch.get_child_blocks(root_block)
dequantize_block = get_dequantize_block(sch, blocks)
if dequantize_block is None:
continue # Skip if no dequantize block is found
# Prepare dequantize info annotation
dequantize_info = self.prepare_dequantize_info(sch, dequantize_block)
# Annotate function with dequantize information
mod[g_var] = func.with_attr("dequantize_info", dequantize_info)
return mod
def prepare_dequantize_info(self, sch: tir.Schedule, dequantize_block: BlockRV) -> Dict:
"""Generate dequantize information for a given block."""
block_stmt = sch.get(dequantize_block)
block_name = block_stmt.name_hint
dequantize_info = {block_name: {"decode_block": block_name, "fast_decoding": False}}
quantize_spec = self.quantize_scheme.linear_weight
if isinstance(quantize_spec, GroupQuantizationSpec):
dequantize_info[block_name].update({
"with_scaling": True,
"group_size": quantize_spec.group_size,
})
# Determine source format based on quantization mode
quantize_mod = quantize_spec.mode
bits, source_format = self.parse_quantize_mode(quantize_mod)
dequantize_info[block_name]["source_format"] = {
"bits": bits,
"format": source_format,
}
# Set storage and target data types
storage_dtype = self.get_storage_dtype(block_stmt, source_format)
dequantize_info[block_name]["storage_dtype"] = storage_dtype
dequantize_info[block_name]["target_format"] = quantize_spec.dtype
return dequantize_info
def parse_quantize_mode(self, quantize_mod: str) -> Tuple[int, str]:
"""Extract bits and format from quantization mode."""
if quantize_mod.startswith("int"):
return int(quantize_mod[3:]), "int"
elif quantize_mod.startswith("uint"):
return int(quantize_mod[4:]), "uint"
raise ValueError(f"Unsupported mode {quantize_mod}")
def get_storage_dtype(self, block_stmt: BlockRV, source_format: str) -> str:
"""Determine storage data type based on source format."""
return (block_stmt.reads[0].buffer.dtype
if "nf" not in source_format else block_stmt.reads[1].buffer.dtype)
| BitBLAS/python/bitblas/relax/transform/annotate_decode_block.py/0 | {
"file_path": "BitBLAS/python/bitblas/relax/transform/annotate_decode_block.py",
"repo_id": "BitBLAS",
"token_count": 2169
} | 151 |
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
#include <cuda_runtime.h>
#include <cuda_fp16.h>
// Pack two half values.
static inline __device__ __host__ unsigned
__pack_half2(const half x, const half y)
{
unsigned v0 = *((unsigned short *)&x);
unsigned v1 = *((unsigned short *)&y);
return (v1 << 16) | v0;
}
void general_compress(const int8_t *lowbit, int8_t *compressed, const int nbit, const int N, bool isSigned = false)
{
int zero_point = isSigned ? ((1 << (nbit - 1)) - 1) : 0;
const int nbit_per_byte = 8 / nbit;
for (int i = 0; i < N / nbit_per_byte; i++)
{
compressed[i] = 0;
for (int j = 0; j < nbit_per_byte; j++)
{
compressed[i] |= ((lowbit[nbit_per_byte * i + j] + zero_point) << (nbit * j));
}
}
}
void general_interleave_fp16(int8_t *origin_arr, int8_t *interleaved, const int nbit, size_t size_in_bytes, bool verbose = false)
{
// For fp16 example
// i4s {e7,e6,e5,e4,e3,e2,e1,e0}
// |-8b-||-8b-||-8b-||-8b-|
// interleave {e7,e5,e3,e1,e6,e4,e2,e0}
/*
BOTTOM_MASK 0 0 0 f 0 0 0 f
i4s e7 e5 e3 e1 e6 e4 e2 e0
selectedVal 0000 0000 0000 e1 0000 0000 0000 e0 // selectedVal = i4s & BOTTOM_MASK
h[0] 0110 0100 0 e1 0110 0100 0 e0 // selectVal | 0x6400
*/
// i2s {e15,e14,e13,e12,e11,e10,e9,e8,e7,e6,e5,e4,e3,e2,e1,e0}
// interleave {e15,e13,e11,e9,e7,e5,e3,e1,e14,e12,e10,e8,e6,e4,e2,e0}
// i1s {e31,e30,e29,e28,e27,e26,e25,e24,e23,e22,e21,e20,e19,e18,e17,e16,e15,e14,e13,e12,e11,e10,e9,e8,e7,e6,e5,e4,e3,e2,e1,e0}
// interleave {e31,e29,e27,e25,e23,e21,e19,e17,e15,e13,e11,e9,e7,e5,e3,e1,e30,e28,e26,e24,e22,e20,e18,e16,e14,e12,e10,e8,e6,e4,e2,e0}
// Assuming size is the number of int32 elements in origin_arr
size_t size = size_in_bytes / sizeof(int32_t);
int32_t *int32_origin = (int32_t *)origin_arr;
int32_t *int32_interleaved = (int32_t *)interleaved;
int mask = (1 << nbit) - 1;
int num_groups = (32 / nbit) / 2;
for (int idx = 0; idx < size; ++idx)
{
int32_t current_value = int32_origin[idx];
int32_t new_value = 0;
for (int i = 0; i < num_groups; ++i)
{
int left_shift = nbit * i;
int right_shift = nbit * (num_groups - i - 1);
new_value |= (current_value & (mask << nbit * (2 * i))) >> left_shift;
new_value |= (current_value & (mask << nbit * (2 * i + 1))) << right_shift;
if (verbose)
{
printf("put %d to %d\n", (2 * i), (nbit * (2 * i) - left_shift) / nbit);
printf("put %d to %d\n", (2 * i + 1), (nbit * (2 * i + 1) + right_shift) / nbit);
}
}
if (nbit == 2)
{
int32_t _new_value_n16 = (new_value & 0xff0000ff);
_new_value_n16 |= ((new_value & 0x0000ff00) >> 8) << 16;
_new_value_n16 |= ((new_value & 0x00ff0000) >> 16) << 8;
int32_interleaved[idx] = _new_value_n16;
}
else if (nbit == 1)
{
int32_t _new_value_n16 = (new_value & 0xf000000f);
_new_value_n16 |= ((new_value & 0x000000f0) >> 4) << 8;
_new_value_n16 |= ((new_value & 0x00000f00) >> 8) << 16;
_new_value_n16 |= ((new_value & 0x0000f000) >> 12) << 24;
_new_value_n16 |= ((new_value & 0x000f0000) >> 16) << 4;
_new_value_n16 |= ((new_value & 0x00f00000) >> 20) << 12;
_new_value_n16 |= ((new_value & 0x0f000000) >> 24) << 20;
int32_interleaved[idx] = _new_value_n16;
}
else
int32_interleaved[idx] = new_value;
}
// Convert back to int8_t if needed
memcpy(interleaved, int32_interleaved, size * sizeof(int32_t));
}
/*
Kind 0: original
Kind 1: rescale
Kind 2: quantized
# documents for zeros_mode:
# original: target = (dequantize_weight - zero_point) * scale
# rescale: target = dequantize_weight * scale - zero_point
# quantized: target = (dequantize_weight - dequantize_zeros) * scale
# Notice: only support "original" and "rescale" now
zeros_mode: Literal["original", "rescale", "quantized"] = "original"
*/
template <typename T1, typename T2, bool isSigned = false, bool withScaling = false, bool withZeros = false, int ZerosKind = 1, typename T3=T2, typename T4=T3>
__device__ void decode_i4b_to_f16(T1 *_i4s, T2 *B_local_decode, const int N = 8, const T3 *scale = nullptr, const T4 *zeros = nullptr)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x000f000f;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
// Minus 7 to scale the value to signed
static constexpr uint MEDIAN_NUM = isSigned ? 0x64076407 : 0x64006400;
uint const i4s = *reinterpret_cast<uint *>(_i4s);
#pragma unroll
// decode 2 elems at one time.
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n"
: "=r"(h[i])
: "r"(i4s >> (4 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
}
}
template <typename T1, typename T2>
__device__ void decode_i4s_to_f16(T1 *_i4s, T2 *B_local_decode, const int N = 8)
{
decode_i4b_to_f16<T1, T2, true>(_i4s, B_local_decode, N);
}
template <typename T1, typename T2>
__device__ void decode_i4u_to_f16(T1 *_i4u, T2 *B_local_decode, const int N = 8)
{
decode_i4b_to_f16<T1, T2, false>(_i4u, B_local_decode, N);
}
template <typename T1, typename T2, typename T3, bool isSigned = false>
__device__ void decode_i4b_to_f16_scale(T1 *_i4s, T2 *B_local_decode, const int N = 8, const T3 *scale = nullptr)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x000f000f;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
// Minus 7 to scale the value to signed
static constexpr uint MEDIAN_NUM = isSigned ? 0x64076407 : 0x64006400;
uint const i4s = *reinterpret_cast<uint *>(_i4s);
T3 const scale_r = *scale;
uint const packed_scales = __pack_half2(scale_r, scale_r);
#pragma unroll
// decode 2 elems at one time.
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n"
: "=r"(h[i])
: "r"(i4s >> (4 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(h[i]) : "r"(h[i]), "r"(packed_scales), "r"(0));
}
}
template <typename T1, typename T2, typename T3>
__device__ void decode_i4s_to_f16_scale(T1 *_i4s, T2 *B_local_decode, T3 *scale = nullptr, const int N = 8)
{
decode_i4b_to_f16_scale<T1, T2, T3, true>(_i4s, B_local_decode, N, scale);
}
template <typename T1, typename T2, typename T3>
__device__ void decode_i4u_to_f16_scale(T1 *_i4u, T2 *B_local_decode, T3 *scale = nullptr, const int N = 8)
{
decode_i4b_to_f16_scale<T1, T2, T3, false>(_i4u, B_local_decode, N, scale);
}
template <typename T1, typename T2, typename T3, typename T4, bool isSigned = false>
__device__ void decode_i4b_to_f16_zeros_original(T1 *_i4s, T2 *B_local_decode, const int N = 8, const T3 *scale = nullptr, const T4 *zeros = nullptr)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x000f000f;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
// Minus 7 to scale the value to signed
static constexpr uint MEDIAN_NUM = isSigned ? 0x64076407 : 0x64006400;
uint const i4s = *reinterpret_cast<uint *>(_i4s);
T3 const scale_r = *scale;
uint const packed_scales = __pack_half2(scale_r, scale_r);
// input zeros maybe int32(qzeros) or half format
T4 const zero_r = *zeros;
uint const packed_zeros = __pack_half2(zero_r, zero_r);
#pragma unroll
// decode 2 elems at one time.
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n"
: "=r"(h[i])
: "r"(i4s >> (4 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(packed_zeros));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(h[i]) : "r"(h[i]), "r"(packed_scales), "r"(0));
}
}
template <typename T1, typename T2, typename T3, typename T4>
__device__ void decode_i4u_to_f16_scale_zeros_original(T1 *_i4u, T2 *B_local_decode, T3 *scale = nullptr, T4 *zeros = nullptr, const int N = 8)
{
decode_i4b_to_f16_zeros_original<T1, T2, T3, T4, false>(_i4u, B_local_decode, N, scale, zeros);
}
template <typename T1, typename T2, typename T3, typename T4, bool isSigned = false>
__device__ void decode_i4b_to_f16_scale_zeros_rescale(T1 *_i4s, T2 *B_local_decode, const int N = 8, const T3 *scale = nullptr, const T4 *zeros = nullptr)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x000f000f;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
// Minus 7 to scale the value to signed
static constexpr uint MEDIAN_NUM = isSigned ? 0x64076407 : 0x64006400;
uint const i4s = *reinterpret_cast<uint *>(_i4s);
T3 const scale_r = *scale;
uint const packed_scales = __pack_half2(scale_r, scale_r);
T4 const zero_r = *zeros;
uint const packed_zeros = 0x80008000 | __pack_half2(zero_r, zero_r);
#pragma unroll
// decode 2 elems at one time.
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n"
: "=r"(h[i])
: "r"(i4s >> (4 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(h[i]) : "r"(h[i]), "r"(packed_scales), "r"(packed_zeros));
}
}
template <typename T1, typename T2, typename T3, typename T4>
__device__ void decode_i4u_to_f16_scale_zeros_rescale(T1 *_i4u, T2 *B_local_decode, T3 *scale = nullptr, T4 *zeros = nullptr, const int N = 8)
{
decode_i4b_to_f16_scale_zeros_rescale<T1, T2, T3, T4, false>(_i4u, B_local_decode, N, scale, zeros);
}
template <typename T1, typename T2, typename T3, typename T4, bool isSigned = false>
__device__ void decode_i4b_to_f16_scale_zeros_quantized(T1 *_i4s, T2 *B_local_decode, const int N = 8, const T3 *scale = nullptr, const T4 *zeros = nullptr)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x000f000f;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
// Minus 7 to scale the value to signed
uint const i4s = *reinterpret_cast<uint *>(_i4s);
T3 const scale_r = *scale;
uint const packed_scales = __pack_half2(scale_r, scale_r);
// input zeros maybe int32(qzeros) or half format
T4 const zero_r = *zeros;
uint median_num = ((0xe400 | zero_r) << 16) | (0xe400 | zero_r);
#pragma unroll
// decode 2 elems at one time.
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n"
: "=r"(h[i])
: "r"(i4s >> (4 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("add.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(median_num));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(h[i]) : "r"(h[i]), "r"(packed_scales), "r"(0));
}
}
template <typename storage_dtype, typename target_dtype, typename scale_dtype, typename zero_dtype>
__device__ void decode_i4u_to_f16_scale_zeros_quantized(storage_dtype *_i4u, target_dtype *B_local_decode, scale_dtype *scale = nullptr, zero_dtype *zeros = nullptr, const int N = 8)
{
decode_i4b_to_f16_scale_zeros_quantized<storage_dtype, target_dtype, scale_dtype, zero_dtype, false>(_i4u, B_local_decode, N, scale, zeros);
}
/*
Kind 0: original
Kind 1: rescale
Kind 2: quantized
# documents for zeros_mode:
# original: target = (dequantize_weight - zero_point) * scale
# rescale: target = dequantize_weight * scale - zero_point
# quantized: target = (dequantize_weight - dequantize_zeros) * scale
# Notice: only support "original" and "rescale" now
zeros_mode: Literal["original", "rescale", "quantized"] = "original"
*/
template <typename T1, typename T2, bool isSigned = false, bool withScaling = false, bool withZeros = false, int ZerosKind = 1>
__device__ void decode_i2b_to_f16(T1 *_i2s, T2 *B_local_decode, const int N = 8, half *scale = nullptr, half *zeros = nullptr)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x00030003;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
static constexpr uint MEDIAN_NUM = isSigned ? 0x64016401 : 0x64006400;
int16_t const i2s_i16 = *reinterpret_cast<int16_t *>(_i2s);
// decode 2 elems at one time.
// interleave {e15,e13,e11,e9,e7,e5,e3,e1,e14,e12,e10,e8,e6,e4,e2,e0}
// only decode for {x,x,x,x,e7,e5,e3,e1,x,x,x,x,e6,e4,e2,e0}
// otherwise the pointer of _i2s should be moved to
int i2s = (i2s_i16 & 0x00ff);
i2s |= ((i2s_i16 & 0xff00) << 8);
#pragma unroll
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n"
: "=r"(h[i])
: "r"(i2s >> (2 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
if constexpr (withZeros && ZerosKind == 0)
{
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(__pack_half2(*zeros, *zeros)));
}
if constexpr (withScaling)
{
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(h[i]) : "r"(h[i]), "r"(__pack_half2(*scale, *scale)), "r"(0));
}
if constexpr (withZeros && ZerosKind == 1)
{
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(__pack_half2(*zeros, *zeros)));
}
}
}
template <typename T1, typename T2>
__device__ void decode_i2s_to_f16(T1 *_i2s, T2 *B_local_decode, const int N = 8)
{
decode_i2b_to_f16<T1, T2, true>(_i2s, B_local_decode, N);
}
template <typename T1, typename T2>
__device__ void decode_i2u_to_f16(T1 *_i2u, T2 *B_local_decode, const int N = 8)
{
decode_i2b_to_f16<T1, T2, false>(_i2u, B_local_decode, N);
}
template <typename T1, typename T2>
__device__ void decode_i2s_to_f16_scale(T1 *_i2s, T2 *B_local_decode, half *scale = nullptr, const int N = 8)
{
decode_i2b_to_f16<T1, T2, true, true>(_i2s, B_local_decode, N, scale);
}
template <typename T1, typename T2>
__device__ void decode_i2u_to_f16_scale(T1 *_i2u, T2 *B_local_decode, half *scale = nullptr, const int N = 8)
{
decode_i2b_to_f16<T1, T2, false, true>(_i2u, B_local_decode, N, scale);
}
template <typename T1, typename T2>
__device__ void decode_i2u_to_f16_scale_zeros_original(T1 *_i2u, T2 *B_local_decode, half *scale = nullptr, half *zeros = nullptr, const int N = 8)
{
decode_i2b_to_f16<T1, T2, false, true, true, 0>(_i2u, B_local_decode, N, scale, zeros);
}
template <typename T1, typename T2>
__device__ void decode_i2u_to_f16_scale_zeros_rescale(T1 *_i2u, T2 *B_local_decode, half *scale = nullptr, half *zeros = nullptr, const int N = 8)
{
decode_i2b_to_f16<T1, T2, false, true, true, 1>(_i2u, B_local_decode, N, scale, zeros);
}
template <typename T1, typename T2, typename T3, typename T4, bool isSigned = false>
__device__ void decode_i2b_to_f16_scale_zeros_quantized(T1 *_i2s, T2 *B_local_decode, const int N = 8, T3 *scale = nullptr, T4 *zeros = nullptr)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x00030003;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
static constexpr uint MEDIAN_NUM = isSigned ? 0x64016401 : 0x64006400;
int16_t const i2s_i16 = *reinterpret_cast<int16_t *>(_i2s);
T3 const scale_r = *scale;
uint const packed_scales = __pack_half2(scale_r, scale_r);
T4 const zero_r = *zeros;
uint median_num = ((0xe400 | zero_r) << 16) | (0xe400 | zero_r);
// decode 2 elems at one time.
// interleave {e15,e13,e11,e9,e7,e5,e3,e1,e14,e12,e10,e8,e6,e4,e2,e0}
// only decode for {x,x,x,x,e7,e5,e3,e1,x,x,x,x,e6,e4,e2,e0}
// otherwise the pointer of _i2s should be moved to
int i2s = (i2s_i16 & 0x00ff);
i2s |= ((i2s_i16 & 0xff00) << 8);
#pragma unroll
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n"
: "=r"(h[i])
: "r"(i2s >> (2 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("add.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(median_num));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(h[i]) : "r"(h[i]), "r"(packed_scales), "r"(0));
}
}
template <typename T1, typename T2, typename T3, typename T4>
__device__ void decode_i2u_to_f16_scale_zeros_quantized(T1 *_i2u, T2 *B_local_decode, T3 *scale = nullptr, T4 *zeros = nullptr, const int N = 8)
{
decode_i2b_to_f16_scale_zeros_quantized<T1, T2, T3, T4, false>(_i2u, B_local_decode, N, scale, zeros);
}
/*
Kind 0: original
Kind 1: rescale
Kind 2: quantized
# documents for zeros_mode:
# original: target = (dequantize_weight - zero_point) * scale
# rescale: target = dequantize_weight * scale - zero_point
# quantized: target = (dequantize_weight - dequantize_zeros) * scale
# Notice: only support "original" and "rescale" now
zeros_mode: Literal["original", "rescale", "quantized"] = "original"
*/
template <typename T1, typename T2, bool isSigned = false, bool withScaling = false, bool withZeros = false, int ZerosKind = 1>
__device__ void decode_i1b_to_f16(T1 *_i1s, T2 *B_local_decode, const int N = 8, half *scale = nullptr, half *zeros = nullptr)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x00010001;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
static constexpr uint MEDIAN_NUM = isSigned ? 0x64006400 : 0x64006400;
static constexpr uint TRANSFORM_SUBTRACT = 0xbc00bc00; // for signed int 2x - 1
// interleave {e31,e29,e27,e25,e23,e21,e19,e17,e15,e13,e11,e9,e7,e5,e3,e1,e30,e28,e26,e24,e22,e20,e18,e16,e14,e12,e10,e8,e6,e4,e2,e0}
// only decode e7,e5,e3,e1,e8,e6,e4,e2,e0
int8_t const i1s_i16 = *reinterpret_cast<int8_t *>(_i1s);
int i1s = (i1s_i16 & 0x0f);
i1s |= ((i1s_i16 & 0xf0) << 12);
#pragma unroll
// decode 2 elems at one time.
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n"
: "=r"(h[i])
: "r"(i1s >> (1 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
if constexpr (isSigned)
{
asm volatile("add.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(h[i]));
asm volatile("add.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(TRANSFORM_SUBTRACT));
}
if constexpr (withZeros && ZerosKind == 0)
{
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(__pack_half2(*zeros, *zeros)));
}
if constexpr (withScaling)
{
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(h[i]) : "r"(h[i]), "r"(__pack_half2(*scale, *scale)), "r"(0));
}
if constexpr (withZeros && ZerosKind == 1)
{
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(__pack_half2(*zeros, *zeros)));
}
}
}
template <typename T1, typename T2>
__device__ void decode_i1s_to_f16(T1 *_i1s, T2 *B_local_decode, const int N = 8)
{
decode_i1b_to_f16<T1, T2, true>(_i1s, B_local_decode, N);
}
template <typename T1, typename T2>
__device__ void decode_i1u_to_f16(T1 *_i1u, T2 *B_local_decode, const int N = 8)
{
decode_i1b_to_f16<T1, T2, false>(_i1u, B_local_decode, N);
}
template <typename T1, typename T2, typename T3, bool isSigned = false>
__device__ void decode_i1b_to_f16_scale(T1 *_i1s, T2 *B_local_decode, const int N = 8, T3 *scale = nullptr)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x00010001;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
static constexpr uint MEDIAN_NUM = isSigned ? 0x64006400 : 0x64006400;
// interleave {e31,e29,e27,e25,e23,e21,e19,e17,e15,e13,e11,e9,e7,e5,e3,e1,e30,e28,e26,e24,e22,e20,e18,e16,e14,e12,e10,e8,e6,e4,e2,e0}
// only decode e7,e5,e3,e1,e8,e6,e4,e2,e0
int8_t const i1s_i16 = *reinterpret_cast<int8_t *>(_i1s);
int i1s = (i1s_i16 & 0x0f);
i1s |= ((i1s_i16 & 0xf0) << 12);
T3 const scale_r = *scale;
uint const packed_scales = __pack_half2(scale_r, scale_r);
#pragma unroll
// decode 2 elems at one time.
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n"
: "=r"(h[i])
: "r"(i1s >> (1 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(h[i]) : "r"(h[i]), "r"(packed_scales), "r"(0));
}
}
template <typename T1, typename T2, typename T3>
__device__ void decode_i1s_to_f16_scale(T1 *_i1s, T2 *B_local_decode, T3 *scale = nullptr, const int N = 8)
{
decode_i1b_to_f16_scale<T1, T2, T3, true>(_i1s, B_local_decode, N, scale);
}
template <typename T1, typename T2, typename T3>
__device__ void decode_i1u_to_f16_scale(T1 *_i1u, T2 *B_local_decode, T3 *scale = nullptr, const int N = 8)
{
decode_i1b_to_f16_scale<T1, T2, T3, false>(_i1u, B_local_decode, N, scale);
}
template <typename T1, typename T2, typename T3, typename T4, bool isSigned = false>
__device__ void decode_i1b_to_f16_zeros_original(T1 *_i1s, T2 *B_local_decode, const int N = 8, T3 *scale = nullptr, T4 *zeros = nullptr)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x00010001;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
static constexpr uint MEDIAN_NUM = isSigned ? 0x64006400 : 0x64006400;
// interleave {e31,e29,e27,e25,e23,e21,e19,e17,e15,e13,e11,e9,e7,e5,e3,e1,e30,e28,e26,e24,e22,e20,e18,e16,e14,e12,e10,e8,e6,e4,e2,e0}
// only decode e7,e5,e3,e1,e8,e6,e4,e2,e0
int8_t const i1s_i16 = *reinterpret_cast<int8_t *>(_i1s);
int i1s = (i1s_i16 & 0x0f);
i1s |= ((i1s_i16 & 0xf0) << 12);
T3 const scale_r = *scale;
uint const packed_scales = __pack_half2(scale_r, scale_r);
// input zeros maybe int32(qzeros) or half format
T4 const zero_r = *zeros;
uint const packed_zeros = __pack_half2(zero_r, zero_r);
#pragma unroll
// decode 2 elems at one time.
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n"
: "=r"(h[i])
: "r"(i1s >> (1 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(packed_zeros));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(h[i]) : "r"(h[i]), "r"(packed_scales), "r"(0));
}
}
template <typename T1, typename T2, typename T3, typename T4>
__device__ void decode_i1u_to_f16_scale_zeros_original(T1 *_i1u, T2 *B_local_decode, T3 *scale = nullptr, T4 *zeros = nullptr, const int N = 8)
{
decode_i1b_to_f16_zeros_original<T1, T2, T3, T4, false>(_i1u, B_local_decode, N, scale, zeros);
}
template <typename T1, typename T2, typename T3, typename T4, bool isSigned = false>
__device__ void decode_i1b_to_f16_scale_zeros_rescale(T1 *_i1s, T2 *B_local_decode, const int N = 8, T3 *scale = nullptr, T4 *zeros = nullptr)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x00010001;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
static constexpr uint MEDIAN_NUM = isSigned ? 0x64006400 : 0x64006400;
// interleave {e31,e29,e27,e25,e23,e21,e19,e17,e15,e13,e11,e9,e7,e5,e3,e1,e30,e28,e26,e24,e22,e20,e18,e16,e14,e12,e10,e8,e6,e4,e2,e0}
// only decode e7,e5,e3,e1,e8,e6,e4,e2,e0
int8_t const i1s_i16 = *reinterpret_cast<int8_t *>(_i1s);
int i1s = (i1s_i16 & 0x0f);
i1s |= ((i1s_i16 & 0xf0) << 12);
T3 const scale_r = *scale;
uint const packed_scales = __pack_half2(scale_r, scale_r);
T4 const zero_r = *zeros;
uint const packed_zeros = 0x80008000 | __pack_half2(zero_r, zero_r);
#pragma unroll
// decode 2 elems at one time.
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n"
: "=r"(h[i])
: "r"(i1s >> (1 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(h[i]) : "r"(h[i]), "r"(packed_scales), "r"(packed_zeros));
}
}
template <typename T1, typename T2, typename T3, typename T4>
__device__ void decode_i1u_to_f16_scale_zeros_rescale(T1 *_i4u, T2 *B_local_decode, T3 *scale = nullptr, T4 *zeros = nullptr, const int N = 8)
{
decode_i1b_to_f16_scale_zeros_rescale<T1, T2, T3, T4, false>(_i4u, B_local_decode, N, scale, zeros);
}
void general_interleave_int8(int8_t *origin_arr, int8_t *interleaved, const int nbit, size_t size_in_bytes, bool verbose = false)
{
// For fp16 example
// i4s {e7,e6,e5,e4,e3,e2,e1,e0}
// |-8b-||-8b-||-8b-||-8b-|
// interleave {e7,e3,e6,e2,e5,e1,e4,e0}
/*
BOTTOM_MASK 0 0 0 f 0 0 0 f
i4s e7 e3 e6 e2 e5 e1 e4 e0
selectedVal 0000 e3 0000 e2 0000 e1 0000 e0 // selectedVal = i4s & BOTTOM_MASK
s[0] 0 e3 0 e2 0 e1 0 e0
*/
// |-----8b-------||-------8b----||----8b---||-----8b----|
// i2s {e15,e14,e13,e12,e11,e10,e9,e8,e7,e6,e5,e4,e3,e2,e1,e0}
// interleave {e15,e11,e7,e3,e14,e10,e6,e2,e13,e9,e5,e1,e12,e8,e4,e0}
// |-------------8b----------------||--------------8b--------------||------------8b--------------||--------8b-----------|
// i1s {e31,e30,e29,e28,e27,e26,e25,e24,e23,e22,e21,e20,e19,e18,e17,e16,e15,e14,e13,e12,e11,e10,e9,e8,e7,e6,e5,e4,e3,e2,e1,e0}
// interleave {e31,e27,e23,e19,e15,e11,e7,e3,e30,e26,e22,e18,e14,e10,e6,e2,e29,e25,e21,e17,e13,e9,e5,e1,e28,e24,e20,e16,e12,e8,e4,e0}
// Assuming size is the number of int32 elements in origin_arr
size_t size = size_in_bytes / sizeof(int32_t);
int32_t *int32_origin = (int32_t *)origin_arr;
int32_t *int32_interleaved = (int32_t *)interleaved;
constexpr int bits_stride = 8;
int elems_per_group = bits_stride / nbit;
int mask = (1 << nbit) - 1;
int num_groups = 32 / bits_stride;
for (int idx = 0; idx < size; ++idx)
{
int32_t current_value = int32_origin[idx];
int32_t new_value = 0;
for (int i = 0; i < num_groups; ++i)
{
for (int j = 0; j < elems_per_group; ++j)
{
int offset = i * elems_per_group + j;
int shift = (offset % num_groups) * bits_stride + (offset / num_groups) * nbit;
int group_value = (current_value >> (nbit * (i * elems_per_group + j))) & mask;
new_value |= group_value << shift;
if (verbose)
printf("put %d to %d\n", offset, shift);
}
}
if (nbit == 1)
{
int32_t _new_value_n16 = (new_value & 0xf0f00f0f);
_new_value_n16 |= ((new_value & 0x000000f0) >> 4) << 16;
_new_value_n16 |= ((new_value & 0x0000f000) >> 12) << 24;
_new_value_n16 |= ((new_value & 0x000f0000) >> 16) << 4;
_new_value_n16 |= ((new_value & 0x0f000000) >> 24) << 12;
int32_interleaved[idx] = _new_value_n16;
}
else
int32_interleaved[idx] = new_value;
}
// Convert back to int8_t if needed
memcpy(interleaved, int32_interleaved, size * sizeof(int32_t));
}
template <typename T1, typename T2, bool isSigned>
__device__ void decode_i4b_to_i8s(T1 *_i4b, T2 *_i8s, const int N = 16)
{
uint *i8s = reinterpret_cast<uint *>(_i8s);
uint *i4b = reinterpret_cast<uint *>(_i4b);
// First, we extract the i4s and construct an intermediate i8 number.
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x0f0f0f0f; // 0xf -> 0b1111 select 0,4,8,12
static constexpr uint I4b_TO_I8s_MAGIC_NUM = 0x00000000; // 0
static constexpr uint MEDIAN_NUM = isSigned ? 0x07070707 : 0x00000000;
#pragma unroll
for (int i = 0; i < (N / 8); i++)
{
// Extract elt_01 - (i4s & 0x000f000f) | 0x64006400
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n"
: "=r"(i8s[i])
: "r"(i4b[0] >> (4 * i)), "n"(BOTTOM_MASK), "n"(I4b_TO_I8s_MAGIC_NUM), "n"(immLut));
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n"
: "=r"(i8s[i + 2])
: "r"(i4b[1] >> (4 * i)), "n"(BOTTOM_MASK), "n"(I4b_TO_I8s_MAGIC_NUM), "n"(immLut));
if constexpr (isSigned)
{
i8s[i] = __vsubss4(i8s[i], MEDIAN_NUM);
i8s[i + 2] = __vsubss4(i8s[i + 2], MEDIAN_NUM);
}
}
}
template <typename T1, typename T2>
__device__ void decode_i4s_to_i8s(T1 *_i4s, T2 *B_local_decode, const int N = 16)
{
decode_i4b_to_i8s<T1, T2, true>(_i4s, B_local_decode, N);
}
template <typename T1, typename T2>
__device__ void decode_i4u_to_i8s(T1 *_i4u, T2 *B_local_decode, const int N = 16)
{
decode_i4b_to_i8s<T1, T2, false>(_i4u, B_local_decode, N);
}
template <typename T1, typename T2, bool isSigned>
__device__ void decode_i2b_to_i8s(T1 *_i2b, T2 *_i8s, const int N = 16)
{
// convert 8 int2b_t to 8 int8b_t -> 2 int32
uint *i8s = reinterpret_cast<uint *>(_i8s);
// i2b = {e7,e6,e5,e4,e3,e2,e1,e0}
// also require interleave {e7,e3,e6,e2,e5,e1,e4,e0}
uint const i2b = *reinterpret_cast<uint *>(_i2b);
// First, we extract the i4s and construct an intermediate fp16 number.
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa; // 0b11101010
static constexpr uint BOTTOM_MASK = 0x03030303; // 0xf -> 0b11 select 0,3
static constexpr uint I8s_MAGIC_NUM = 0x00000000; // 1024
static constexpr uint MEDIAN_NUM = isSigned ? 0x01010101 : 0x00000000;
#pragma unroll
for (int i = 0; i < (N / 4); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n"
: "=r"(i8s[i])
: "r"(i2b >> (2 * i)), "n"(BOTTOM_MASK), "n"(I8s_MAGIC_NUM), "n"(immLut));
if constexpr (isSigned)
{
i8s[i] = __vsubss4(i8s[i], MEDIAN_NUM);
}
}
}
template <typename T1, typename T2>
__device__ void decode_i2s_to_i8s(T1 *_i2s, T2 *B_local_decode, const int N = 16)
{
decode_i2b_to_i8s<T1, T2, true>(_i2s, B_local_decode, N);
}
template <typename T1, typename T2>
__device__ void decode_i2u_to_i8s(T1 *_i2u, T2 *B_local_decode, const int N = 16)
{
decode_i2b_to_i8s<T1, T2, false>(_i2u, B_local_decode, N);
}
template <typename T1, typename T2, bool isSigned>
__device__ void decode_i1b_to_i8s(T1 *_i1b, T2 *_i8s, const int N = 16)
{
int i8s[4];
// vector load
*reinterpret_cast<int4 *>(i8s) = *reinterpret_cast<int4 *>(_i8s);
int16_t i1b_i16 = *reinterpret_cast<int16_t *>(_i1b);
// permutate: {e0,e4,e8,e12,e2,e6,e10,e14,e1,e5,e9,e13,e3,e7,e11,e15}
// into: {e0,e4,e8,e12,x,x,x,x,e1,e5,e9,x,x,x,x,e13,e2,e6,e10,e14,e1,e5,e9,e13,e3,e7,e11,e15,x,x,x,x}
int i1b = (i1b_i16 & 0x0f0f);
i1b |= ((i1b_i16 & 0xf0f0) << 12);
// i1b {0..,e15,e14,e13,e12,e11,e10,e9,e8,e7,e6,e5,e4,e3,e2,e1,e0}
// interleave {0..,e15,e13,e11,e9,e7,e5,e3,e1,e14,e12,e10,e8,e6,e4,e2,e0}
// First, we extract the i1b and construct an intermediate fp16 number.
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa; // 0b11101010
static constexpr uint BOTTOM_MASK = 0x01010101; // 0x1 -> 0b01 select 0,1
static constexpr uint I8s_MAGIC_NUM = 0x00000000;
static constexpr uint TRANSFORM_SUBTRACT = 0xffffffff; // for signed int 2x - 1
for (int i = 0; i < N / 4; i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n"
: "=r"(i8s[i])
: "r"(i1b >> i), "n"(BOTTOM_MASK), "n"(I8s_MAGIC_NUM), "n"(immLut));
if constexpr (isSigned)
{
int _i8s = i8s[i];
int tmp = __vcmpleu4(_i8s, 0);
_i8s |= tmp;
i8s[i] = _i8s;
// // i8s[i] = __vadd4(__vadd4(i8s[i], i8s[i]), TRANSFORM_SUBTRACT);
}
}
// vector store
*reinterpret_cast<int4 *>(_i8s) = *reinterpret_cast<int4 *>(i8s);
}
template <typename T1, typename T2>
__device__ void decode_i1s_to_i8s(T1 *_i1s, T2 *B_local_decode, const int N = 16)
{
decode_i1b_to_i8s<T1, T2, true>(_i1s, B_local_decode, N);
}
template <typename T1, typename T2>
__device__ void decode_i1u_to_i8s(T1 *_i1u, T2 *B_local_decode, const int N = 16)
{
decode_i1b_to_i8s<T1, T2, false>(_i1u, B_local_decode, N);
}
| BitBLAS/testing/cpp/lop3_type_conversion/fast_decoding.hpp/0 | {
"file_path": "BitBLAS/testing/cpp/lop3_type_conversion/fast_decoding.hpp",
"repo_id": "BitBLAS",
"token_count": 18164
} | 152 |
from .vg_caption_datamodule import VisualGenomeCaptionDataModule
from .f30k_caption_karpathy_datamodule import F30KCaptionKarpathyDataModule
from .coco_caption_karpathy_datamodule import CocoCaptionKarpathyDataModule
from .conceptual_caption_datamodule import ConceptualCaptionDataModule
from .sbu_datamodule import SBUCaptionDataModule
from .vqav2_datamodule import VQAv2DataModule
from .nlvr2_datamodule import NLVR2DataModule
from .snli_datamodule import SNLIDataModule
_datamodules = {
"vg": VisualGenomeCaptionDataModule,
"f30k": F30KCaptionKarpathyDataModule,
"coco": CocoCaptionKarpathyDataModule,
"gcc": ConceptualCaptionDataModule,
"sbu": SBUCaptionDataModule,
"vqa": VQAv2DataModule,
"nlvr2": NLVR2DataModule,
"snli": SNLIDataModule,
}
| BridgeTower/src/datamodules/__init__.py/0 | {
"file_path": "BridgeTower/src/datamodules/__init__.py",
"repo_id": "BridgeTower",
"token_count": 303
} | 153 |
from .base_dataset import BaseDataset
import sys
import random
class NLVR2Dataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
if split == "train":
names = ["nlvr2_train"]
elif split == "val":
names = ["nlvr2_dev", "nlvr2_test1"] # ViLT, METER
elif split == "test":
names = ["nlvr2_dev", "nlvr2_test1"] # ViLT, METER
super().__init__(
*args,
**kwargs,
names=names,
text_column_name="questions",
remove_duplicate=False,
)
def __getitem__(self, index):
result = None
while result is None:
try:
image_tensor_0 = self.get_image(index, image_key="image_0")["image"]
image_tensor_1 = self.get_image(index, image_key="image_1")["image"]
text = self.get_text(index)["text"]
result = True
except:
print(
f"error while read file idx {index} in {self.names[0]}",
file=sys.stderr,
)
index = random.randint(0, len(self.index_mapper) - 1)
index, question_index = self.index_mapper[index]
answers = self.table["answers"][index][question_index].as_py()
answers = answers == "True"
return {
"image_0": image_tensor_0,
"image_1": image_tensor_1,
"text": text,
"answers": answers,
"table_name": self.table_names[index],
}
| BridgeTower/src/datasets/nlvr2_dataset.py/0 | {
"file_path": "BridgeTower/src/datasets/nlvr2_dataset.py",
"repo_id": "BridgeTower",
"token_count": 872
} | 154 |
""" Model creation / weight loading / state_dict helpers
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
import os
import math
from collections import OrderedDict
from copy import deepcopy
from typing import Any, Callable, Optional, Tuple
import torch
import torch.nn as nn
from timm.models.features import FeatureListNet, FeatureDictNet, FeatureHookNet
from timm.models.hub import has_hf_hub, download_cached_file, load_state_dict_from_hf #, load_state_dict_from_url
from torch.utils.model_zoo import load_url as load_state_dict_from_url
from timm.models.layers import Conv2dSame, Linear
def swin_adapt_position_encoding(model, before=384, patch_size=32, after=384,
suffix='relative_position_bias_table'):
if after == before:
return model
grid_before = int(before/32)
grid_after = int(after/32) #after // patch_size
before = (2*grid_before-1)
import math
after = (2*grid_after-1)
keys = [k for k in model if k.endswith(suffix)]
assert len(keys) > 0
for key in keys:
pos_embed = model[key]
pos_embed = pos_embed.transpose(0, 1).view(-1, before, before)
pos_embed = torch.nn.functional.interpolate(pos_embed.unsqueeze(0), size=(after, after), mode='bicubic')
pos_embed = pos_embed.squeeze(0).permute((1, 2, 0))
pos_embed = pos_embed.contiguous().view(-1, pos_embed.size(-1))
model[key] = pos_embed
keys = [k for k in model if k.endswith('attn_mask')]
for key in keys:
model.pop(key)
keys = [k for k in model if k.endswith('relative_position_index')]
for key in keys:
model.pop(key)
return model
_logger = logging.getLogger(__name__)
def load_state_dict(checkpoint_path, use_ema=False):
if checkpoint_path and os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
state_dict_key = 'state_dict'
if isinstance(checkpoint, dict):
if use_ema and 'state_dict_ema' in checkpoint:
state_dict_key = 'state_dict_ema'
if state_dict_key and state_dict_key in checkpoint:
new_state_dict = OrderedDict()
for k, v in checkpoint[state_dict_key].items():
# strip `module.` prefix
name = k[7:] if k.startswith('module') else k
new_state_dict[name] = v
state_dict = new_state_dict
else:
state_dict = checkpoint
_logger.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path))
return state_dict
else:
_logger.error("No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
def load_checkpoint(model, checkpoint_path, use_ema=False, strict=True):
if os.path.splitext(checkpoint_path)[-1].lower() in ('.npz', '.npy'):
# numpy checkpoint, try to load via model specific load_pretrained fn
if hasattr(model, 'load_pretrained'):
model.load_pretrained(checkpoint_path)
else:
raise NotImplementedError('Model cannot load numpy checkpoint')
return
state_dict = load_state_dict(checkpoint_path, use_ema)
model.load_state_dict(state_dict, strict=strict)
def resume_checkpoint(model, checkpoint_path, optimizer=None, loss_scaler=None, log_info=True):
resume_epoch = None
if os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
if log_info:
_logger.info('Restoring model state from checkpoint...')
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict'].items():
name = k[7:] if k.startswith('module') else k
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
if optimizer is not None and 'optimizer' in checkpoint:
if log_info:
_logger.info('Restoring optimizer state from checkpoint...')
optimizer.load_state_dict(checkpoint['optimizer'])
if loss_scaler is not None and loss_scaler.state_dict_key in checkpoint:
if log_info:
_logger.info('Restoring AMP loss scaler state from checkpoint...')
loss_scaler.load_state_dict(checkpoint[loss_scaler.state_dict_key])
if 'epoch' in checkpoint:
resume_epoch = checkpoint['epoch']
if 'version' in checkpoint and checkpoint['version'] > 1:
resume_epoch += 1 # start at the next epoch, old checkpoints incremented before save
if log_info:
_logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch']))
else:
model.load_state_dict(checkpoint)
if log_info:
_logger.info("Loaded checkpoint '{}'".format(checkpoint_path))
return resume_epoch
else:
_logger.error("No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
def load_custom_pretrained(model, default_cfg=None, load_fn=None, progress=False, check_hash=False):
r"""Loads a custom (read non .pth) weight file
Downloads checkpoint file into cache-dir like torch.hub based loaders, but calls
a passed in custom load fun, or the `load_pretrained` model member fn.
If the object is already present in `model_dir`, it's deserialized and returned.
The default value of `model_dir` is ``<hub_dir>/checkpoints`` where
`hub_dir` is the directory returned by :func:`~torch.hub.get_dir`.
Args:
model: The instantiated model to load weights into
default_cfg (dict): Default pretrained model cfg
load_fn: An external stand alone fn that loads weights into provided model, otherwise a fn named
'laod_pretrained' on the model will be called if it exists
progress (bool, optional): whether or not to display a progress bar to stderr. Default: False
check_hash(bool, optional): If True, the filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of the contents of the file. The hash is used to
ensure unique names and to verify the contents of the file. Default: False
"""
default_cfg = default_cfg or getattr(model, 'default_cfg', None) or {}
pretrained_url = default_cfg.get('url', None)
if not pretrained_url:
_logger.warning("No pretrained weights exist for this model. Using random initialization.")
return
cached_file = download_cached_file(default_cfg['url'], check_hash=check_hash, progress=progress)
if load_fn is not None:
load_fn(model, cached_file)
elif hasattr(model, 'load_pretrained'):
model.load_pretrained(cached_file)
else:
_logger.warning("Valid function to load pretrained weights is not available, using random initialization.")
def adapt_input_conv(in_chans, conv_weight):
conv_type = conv_weight.dtype
conv_weight = conv_weight.float() # Some weights are in torch.half, ensure it's float for sum on CPU
O, I, J, K = conv_weight.shape
if in_chans == 1:
if I > 3:
assert conv_weight.shape[1] % 3 == 0
# For models with space2depth stems
conv_weight = conv_weight.reshape(O, I // 3, 3, J, K)
conv_weight = conv_weight.sum(dim=2, keepdim=False)
else:
conv_weight = conv_weight.sum(dim=1, keepdim=True)
elif in_chans != 3:
if I != 3:
raise NotImplementedError('Weight format not supported by conversion.')
else:
# NOTE this strategy should be better than random init, but there could be other combinations of
# the original RGB input layer weights that'd work better for specific cases.
repeat = int(math.ceil(in_chans / 3))
conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :]
conv_weight *= (3 / float(in_chans))
conv_weight = conv_weight.to(conv_type)
return conv_weight
def load_pretrained(model, img_size, default_cfg=None, num_classes=1000, in_chans=3, filter_fn=None, strict=True, progress=False, resolution_before=384):
""" Load pretrained checkpoint
Args:
model (nn.Module) : PyTorch model module
default_cfg (Optional[Dict]): default configuration for pretrained weights / target dataset
num_classes (int): num_classes for model
in_chans (int): in_chans for model
filter_fn (Optional[Callable]): state_dict filter fn for load (takes state_dict, model as args)
strict (bool): strict load of checkpoint
progress (bool): enable progress bar for weight download
"""
default_cfg = default_cfg or getattr(model, 'default_cfg', None) or {}
pretrained_url = default_cfg.get('url', None)
hf_hub_id = default_cfg.get('hf_hub', None)
if not pretrained_url and not hf_hub_id:
_logger.warning("No pretrained weights exist for this model. Using random initialization.")
return
if hf_hub_id and has_hf_hub(necessary=not pretrained_url):
_logger.info(f'Loading pretrained weights from Hugging Face hub ({hf_hub_id})')
state_dict = load_state_dict_from_hf(hf_hub_id)
else:
_logger.info(f'Loading pretrained weights from url ({pretrained_url})')
state_dict = load_state_dict_from_url(pretrained_url, progress=progress, map_location='cpu')
swin_adapt_position_encoding(state_dict['model'], before=resolution_before, after=img_size)
if filter_fn is not None:
# for backwards compat with filter fn that take one arg, try one first, the two
try:
state_dict = filter_fn(state_dict)
except TypeError:
state_dict = filter_fn(state_dict, model)
input_convs = default_cfg.get('first_conv', None)
if input_convs is not None and in_chans != 3:
if isinstance(input_convs, str):
input_convs = (input_convs,)
for input_conv_name in input_convs:
weight_name = input_conv_name + '.weight'
try:
state_dict[weight_name] = adapt_input_conv(in_chans, state_dict[weight_name])
_logger.info(
f'Converted input conv {input_conv_name} pretrained weights from 3 to {in_chans} channel(s)')
except NotImplementedError as e:
del state_dict[weight_name]
strict = False
_logger.warning(
f'Unable to convert pretrained {input_conv_name} weights, using random init for this layer.')
classifiers = default_cfg.get('classifier', None)
label_offset = default_cfg.get('label_offset', 0)
if classifiers is not None:
if isinstance(classifiers, str):
classifiers = (classifiers,)
if num_classes != default_cfg['num_classes']:
for classifier_name in classifiers:
# completely discard fully connected if model num_classes doesn't match pretrained weights
del state_dict[classifier_name + '.weight']
del state_dict[classifier_name + '.bias']
strict = False
elif label_offset > 0:
for classifier_name in classifiers:
# special case for pretrained weights with an extra background class in pretrained weights
classifier_weight = state_dict[classifier_name + '.weight']
state_dict[classifier_name + '.weight'] = classifier_weight[label_offset:]
classifier_bias = state_dict[classifier_name + '.bias']
state_dict[classifier_name + '.bias'] = classifier_bias[label_offset:]
model.load_state_dict(state_dict, strict=strict)
def extract_layer(model, layer):
layer = layer.split('.')
module = model
if hasattr(model, 'module') and layer[0] != 'module':
module = model.module
if not hasattr(model, 'module') and layer[0] == 'module':
layer = layer[1:]
for l in layer:
if hasattr(module, l):
if not l.isdigit():
module = getattr(module, l)
else:
module = module[int(l)]
else:
return module
return module
def set_layer(model, layer, val):
layer = layer.split('.')
module = model
if hasattr(model, 'module') and layer[0] != 'module':
module = model.module
lst_index = 0
module2 = module
for l in layer:
if hasattr(module2, l):
if not l.isdigit():
module2 = getattr(module2, l)
else:
module2 = module2[int(l)]
lst_index += 1
lst_index -= 1
for l in layer[:lst_index]:
if not l.isdigit():
module = getattr(module, l)
else:
module = module[int(l)]
l = layer[lst_index]
setattr(module, l, val)
def adapt_model_from_string(parent_module, model_string):
separator = '***'
state_dict = {}
lst_shape = model_string.split(separator)
for k in lst_shape:
k = k.split(':')
key = k[0]
shape = k[1][1:-1].split(',')
if shape[0] != '':
state_dict[key] = [int(i) for i in shape]
new_module = deepcopy(parent_module)
for n, m in parent_module.named_modules():
old_module = extract_layer(parent_module, n)
if isinstance(old_module, nn.Conv2d) or isinstance(old_module, Conv2dSame):
if isinstance(old_module, Conv2dSame):
conv = Conv2dSame
else:
conv = nn.Conv2d
s = state_dict[n + '.weight']
in_channels = s[1]
out_channels = s[0]
g = 1
if old_module.groups > 1:
in_channels = out_channels
g = in_channels
new_conv = conv(
in_channels=in_channels, out_channels=out_channels, kernel_size=old_module.kernel_size,
bias=old_module.bias is not None, padding=old_module.padding, dilation=old_module.dilation,
groups=g, stride=old_module.stride)
set_layer(new_module, n, new_conv)
if isinstance(old_module, nn.BatchNorm2d):
new_bn = nn.BatchNorm2d(
num_features=state_dict[n + '.weight'][0], eps=old_module.eps, momentum=old_module.momentum,
affine=old_module.affine, track_running_stats=True)
set_layer(new_module, n, new_bn)
if isinstance(old_module, nn.Linear):
# FIXME extra checks to ensure this is actually the FC classifier layer and not a diff Linear layer?
num_features = state_dict[n + '.weight'][1]
new_fc = Linear(
in_features=num_features, out_features=old_module.out_features, bias=old_module.bias is not None)
set_layer(new_module, n, new_fc)
if hasattr(new_module, 'num_features'):
new_module.num_features = num_features
new_module.eval()
parent_module.eval()
return new_module
def adapt_model_from_file(parent_module, model_variant):
adapt_file = os.path.join(os.path.dirname(__file__), 'pruned', model_variant + '.txt')
with open(adapt_file, 'r') as f:
return adapt_model_from_string(parent_module, f.read().strip())
def default_cfg_for_features(default_cfg):
default_cfg = deepcopy(default_cfg)
# remove default pretrained cfg fields that don't have much relevance for feature backbone
to_remove = ('num_classes', 'crop_pct', 'classifier', 'global_pool') # add default final pool size?
for tr in to_remove:
default_cfg.pop(tr, None)
return default_cfg
def overlay_external_default_cfg(default_cfg, kwargs):
""" Overlay 'external_default_cfg' in kwargs on top of default_cfg arg.
"""
external_default_cfg = kwargs.pop('external_default_cfg', None)
if external_default_cfg:
default_cfg.pop('url', None) # url should come from external cfg
default_cfg.pop('hf_hub', None) # hf hub id should come from external cfg
default_cfg.update(external_default_cfg)
def set_default_kwargs(kwargs, names, default_cfg):
for n in names:
# for legacy reasons, model __init__args uses img_size + in_chans as separate args while
# default_cfg has one input_size=(C, H ,W) entry
if n == 'img_size':
input_size = default_cfg.get('input_size', None)
if input_size is not None:
assert len(input_size) == 3
kwargs.setdefault(n, input_size[-2:])
elif n == 'in_chans':
input_size = default_cfg.get('input_size', None)
if input_size is not None:
assert len(input_size) == 3
kwargs.setdefault(n, input_size[0])
else:
default_val = default_cfg.get(n, None)
if default_val is not None:
kwargs.setdefault(n, default_cfg[n])
def filter_kwargs(kwargs, names):
if not kwargs or not names:
return
for n in names:
kwargs.pop(n, None)
def update_default_cfg_and_kwargs(default_cfg, kwargs, kwargs_filter):
""" Update the default_cfg and kwargs before passing to model
FIXME this sequence of overlay default_cfg, set default kwargs, filter kwargs
could/should be replaced by an improved configuration mechanism
Args:
default_cfg: input default_cfg (updated in-place)
kwargs: keyword args passed to model build fn (updated in-place)
kwargs_filter: keyword arg keys that must be removed before model __init__
"""
# Overlay default cfg values from `external_default_cfg` if it exists in kwargs
overlay_external_default_cfg(default_cfg, kwargs)
# Set model __init__ args that can be determined by default_cfg (if not already passed as kwargs)
default_kwarg_names = ('num_classes', 'global_pool', 'in_chans')
if default_cfg.get('fixed_input_size', False):
# if fixed_input_size exists and is True, model takes an img_size arg that fixes its input size
default_kwarg_names += ('img_size',)
set_default_kwargs(kwargs, names=default_kwarg_names, default_cfg=default_cfg)
# Filter keyword args for task specific model variants (some 'features only' models, etc.)
filter_kwargs(kwargs, names=kwargs_filter)
def swin_build_model_with_cfg(
model_cls: Callable,
variant: str,
pretrained: bool,
default_cfg: dict,
model_cfg: Optional[Any] = None,
feature_cfg: Optional[dict] = None,
pretrained_strict: bool = True,
pretrained_filter_fn: Optional[Callable] = None,
pretrained_custom_load: bool = False,
kwargs_filter: Optional[Tuple[str]] = None,
**kwargs):
""" Build model with specified default_cfg and optional model_cfg
This helper fn aids in the construction of a model including:
* handling default_cfg and associated pretained weight loading
* passing through optional model_cfg for models with config based arch spec
* features_only model adaptation
* pruning config / model adaptation
Args:
model_cls (nn.Module): model class
variant (str): model variant name
pretrained (bool): load pretrained weights
default_cfg (dict): model's default pretrained/task config
model_cfg (Optional[Dict]): model's architecture config
feature_cfg (Optional[Dict]: feature extraction adapter config
pretrained_strict (bool): load pretrained weights strictly
pretrained_filter_fn (Optional[Callable]): filter callable for pretrained weights
pretrained_custom_load (bool): use custom load fn, to load numpy or other non PyTorch weights
kwargs_filter (Optional[Tuple]): kwargs to filter before passing to model
**kwargs: model args passed through to model __init__
"""
pruned = kwargs.pop('pruned', False)
features = False
feature_cfg = feature_cfg or {}
default_cfg = deepcopy(default_cfg) if default_cfg else {}
update_default_cfg_and_kwargs(default_cfg, kwargs, kwargs_filter)
default_cfg.setdefault('architecture', variant)
# Setup for feature extraction wrapper done at end of this fn
if kwargs.pop('features_only', False):
features = True
feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4))
if 'out_indices' in kwargs:
feature_cfg['out_indices'] = kwargs.pop('out_indices')
# Build the model
model = model_cls(**kwargs) if model_cfg is None else model_cls(cfg=model_cfg, **kwargs)
model.default_cfg = default_cfg
if pruned:
model = adapt_model_from_file(model, variant)
# For classification models, check class attr, then kwargs, then default to 1k, otherwise 0 for feats
num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000))
if pretrained:
if pretrained_custom_load:
load_custom_pretrained(model)
else:
load_pretrained(
model,
num_classes=num_classes_pretrained,
in_chans=kwargs.get('in_chans', 3),
filter_fn=pretrained_filter_fn,
img_size=kwargs['img_size'],
strict=pretrained_strict,
resolution_before=kwargs['config']['resolution_before'])
# Wrap the model in a feature extraction module if enabled
if features:
feature_cls = FeatureListNet
if 'feature_cls' in feature_cfg:
feature_cls = feature_cfg.pop('feature_cls')
if isinstance(feature_cls, str):
feature_cls = feature_cls.lower()
if 'hook' in feature_cls:
feature_cls = FeatureHookNet
else:
assert False, f'Unknown feature class {feature_cls}'
model = feature_cls(model, **feature_cfg)
model.default_cfg = default_cfg_for_features(default_cfg) # add back default_cfg
return model
def model_parameters(model, exclude_head=False):
if exclude_head:
# FIXME this a bit of a quick and dirty hack to skip classifier head params based on ordering
return [p for p in model.parameters()][:-2]
else:
return model.parameters()
def named_apply(fn: Callable, module: nn.Module, name='', depth_first=True, include_root=False) -> nn.Module:
if not depth_first and include_root:
fn(module=module, name=name)
for child_name, child_module in module.named_children():
child_name = '.'.join((name, child_name)) if name else child_name
named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True)
if depth_first and include_root:
fn(module=module, name=name)
return module
def named_modules(module: nn.Module, name='', depth_first=True, include_root=False):
if not depth_first and include_root:
yield name, module
for child_name, child_module in module.named_children():
child_name = '.'.join((name, child_name)) if name else child_name
yield from named_modules(
module=child_module, name=child_name, depth_first=depth_first, include_root=True)
if depth_first and include_root:
yield name, module
| BridgeTower/src/modules/swin_helpers.py/0 | {
"file_path": "BridgeTower/src/modules/swin_helpers.py",
"repo_id": "BridgeTower",
"token_count": 9842
} | 155 |
import json
import pandas as pd
import pyarrow as pa
import os
from tqdm import tqdm
from collections import defaultdict
label2id = {'contradiction': 0, 'neutral': 1, 'entailment': 2}
def process(root, imgid, ann):
with open(f"{root}/flickr30k-images/{imgid}.jpg", "rb") as fp:
img = fp.read()
sentences = ann['sentences']
labels = ann['labels']
return [img, sentences, labels]
def make_arrow(root, dataset_root):
train_data = list(
map(json.loads, open(f"{root}/snli_ve/snli_ve_train.jsonl").readlines())
)
test_data = list(
map(json.loads, open(f"{root}/snli_ve/snli_ve_test.jsonl").readlines())
)
dev_data = list(
map(json.loads, open(f"{root}/snli_ve/snli_ve_dev.jsonl").readlines())
)
splits = [
"train",
"dev",
"test",
]
annotations = dict()
annotations['train'] = train_data
annotations['dev'] = dev_data
annotations['test'] = test_data
annots = dict()
for split in splits:
annots[split] = {}
for line in annotations[split]:
imgid = line['Flickr30K_ID']
if not imgid in annots[split]:
annots[split][imgid] = {}
annots[split][imgid]['sentences'] = []
annots[split][imgid]['labels'] = []
annots[split][imgid]['sentences'].append( [line['sentence1'], line['sentence2']] )
annots[split][imgid]['labels'].append( label2id[line['gold_label']] )
for split in splits:
bs = [process(root, imgid, annots[split][imgid]) for imgid in tqdm(annots[split])]
dataframe = pd.DataFrame(
bs, columns=["image", "sentences", "labels"]
)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(f"{dataset_root}/snli_{split}.arrow", "wb") as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
make_arrow('~/BT/dataset/mscoco_flickr30k_vqav2_snli_ve', '~/BT/dataset/fine-tune') | BridgeTower/src/utils/write_snli.py/0 | {
"file_path": "BridgeTower/src/utils/write_snli.py",
"repo_id": "BridgeTower",
"token_count": 999
} | 156 |
FROM nvidia/cuda:11.1-base-ubuntu20.04
RUN apt update && DEBIAN_FRONTEND=noninteractive apt install git bzip2 wget unzip python3-pip python3-dev cmake libgl1-mesa-dev python-is-python3 libgtk2.0-dev -yq
ADD . /app
WORKDIR /app
RUN cd Face_Enhancement/models/networks/ &&\
git clone https://github.com/vacancy/Synchronized-BatchNorm-PyTorch &&\
cp -rf Synchronized-BatchNorm-PyTorch/sync_batchnorm . &&\
cd ../../../
RUN cd Global/detection_models &&\
git clone https://github.com/vacancy/Synchronized-BatchNorm-PyTorch &&\
cp -rf Synchronized-BatchNorm-PyTorch/sync_batchnorm . &&\
cd ../../
RUN cd Face_Detection/ &&\
wget http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2 &&\
bzip2 -d shape_predictor_68_face_landmarks.dat.bz2 &&\
cd ../
RUN cd Face_Enhancement/ &&\
wget https://facevc.blob.core.windows.net/zhanbo/old_photo/pretrain/Face_Enhancement/checkpoints.zip &&\
unzip checkpoints.zip &&\
cd ../ &&\
cd Global/ &&\
wget https://facevc.blob.core.windows.net/zhanbo/old_photo/pretrain/Global/checkpoints.zip &&\
unzip checkpoints.zip &&\
rm -f checkpoints.zip &&\
cd ../
RUN pip3 install numpy
RUN pip3 install dlib
RUN pip3 install -r requirements.txt
RUN git clone https://github.com/NVlabs/SPADE.git
RUN cd SPADE/ && pip3 install -r requirements.txt
RUN cd ..
CMD ["python3", "run.py"]
| Bringing-Old-Photos-Back-to-Life/Dockerfile/0 | {
"file_path": "Bringing-Old-Photos-Back-to-Life/Dockerfile",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 540
} | 157 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.networks.base_network import BaseNetwork
from models.networks.normalization import get_nonspade_norm_layer
from models.networks.architecture import ResnetBlock as ResnetBlock
from models.networks.architecture import SPADEResnetBlock as SPADEResnetBlock
from models.networks.architecture import SPADEResnetBlock_non_spade as SPADEResnetBlock_non_spade
class SPADEGenerator(BaseNetwork):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.set_defaults(norm_G="spectralspadesyncbatch3x3")
parser.add_argument(
"--num_upsampling_layers",
choices=("normal", "more", "most"),
default="normal",
help="If 'more', adds upsampling layer between the two middle resnet blocks. If 'most', also add one more upsampling + resnet layer at the end of the generator",
)
return parser
def __init__(self, opt):
super().__init__()
self.opt = opt
nf = opt.ngf
self.sw, self.sh = self.compute_latent_vector_size(opt)
print("The size of the latent vector size is [%d,%d]" % (self.sw, self.sh))
if opt.use_vae:
# In case of VAE, we will sample from random z vector
self.fc = nn.Linear(opt.z_dim, 16 * nf * self.sw * self.sh)
else:
# Otherwise, we make the network deterministic by starting with
# downsampled segmentation map instead of random z
if self.opt.no_parsing_map:
self.fc = nn.Conv2d(3, 16 * nf, 3, padding=1)
else:
self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)
if self.opt.injection_layer == "all" or self.opt.injection_layer == "1":
self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
else:
self.head_0 = SPADEResnetBlock_non_spade(16 * nf, 16 * nf, opt)
if self.opt.injection_layer == "all" or self.opt.injection_layer == "2":
self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
else:
self.G_middle_0 = SPADEResnetBlock_non_spade(16 * nf, 16 * nf, opt)
self.G_middle_1 = SPADEResnetBlock_non_spade(16 * nf, 16 * nf, opt)
if self.opt.injection_layer == "all" or self.opt.injection_layer == "3":
self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)
else:
self.up_0 = SPADEResnetBlock_non_spade(16 * nf, 8 * nf, opt)
if self.opt.injection_layer == "all" or self.opt.injection_layer == "4":
self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)
else:
self.up_1 = SPADEResnetBlock_non_spade(8 * nf, 4 * nf, opt)
if self.opt.injection_layer == "all" or self.opt.injection_layer == "5":
self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)
else:
self.up_2 = SPADEResnetBlock_non_spade(4 * nf, 2 * nf, opt)
if self.opt.injection_layer == "all" or self.opt.injection_layer == "6":
self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)
else:
self.up_3 = SPADEResnetBlock_non_spade(2 * nf, 1 * nf, opt)
final_nc = nf
if opt.num_upsampling_layers == "most":
self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt)
final_nc = nf // 2
self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, opt):
if opt.num_upsampling_layers == "normal":
num_up_layers = 5
elif opt.num_upsampling_layers == "more":
num_up_layers = 6
elif opt.num_upsampling_layers == "most":
num_up_layers = 7
else:
raise ValueError("opt.num_upsampling_layers [%s] not recognized" % opt.num_upsampling_layers)
sw = opt.load_size // (2 ** num_up_layers)
sh = round(sw / opt.aspect_ratio)
return sw, sh
def forward(self, input, degraded_image, z=None):
seg = input
if self.opt.use_vae:
# we sample z from unit normal and reshape the tensor
if z is None:
z = torch.randn(input.size(0), self.opt.z_dim, dtype=torch.float32, device=input.get_device())
x = self.fc(z)
x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)
else:
# we downsample segmap and run convolution
if self.opt.no_parsing_map:
x = F.interpolate(degraded_image, size=(self.sh, self.sw), mode="bilinear")
else:
x = F.interpolate(seg, size=(self.sh, self.sw), mode="nearest")
x = self.fc(x)
x = self.head_0(x, seg, degraded_image)
x = self.up(x)
x = self.G_middle_0(x, seg, degraded_image)
if self.opt.num_upsampling_layers == "more" or self.opt.num_upsampling_layers == "most":
x = self.up(x)
x = self.G_middle_1(x, seg, degraded_image)
x = self.up(x)
x = self.up_0(x, seg, degraded_image)
x = self.up(x)
x = self.up_1(x, seg, degraded_image)
x = self.up(x)
x = self.up_2(x, seg, degraded_image)
x = self.up(x)
x = self.up_3(x, seg, degraded_image)
if self.opt.num_upsampling_layers == "most":
x = self.up(x)
x = self.up_4(x, seg, degraded_image)
x = self.conv_img(F.leaky_relu(x, 2e-1))
x = F.tanh(x)
return x
class Pix2PixHDGenerator(BaseNetwork):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.add_argument(
"--resnet_n_downsample", type=int, default=4, help="number of downsampling layers in netG"
)
parser.add_argument(
"--resnet_n_blocks",
type=int,
default=9,
help="number of residual blocks in the global generator network",
)
parser.add_argument(
"--resnet_kernel_size", type=int, default=3, help="kernel size of the resnet block"
)
parser.add_argument(
"--resnet_initial_kernel_size", type=int, default=7, help="kernel size of the first convolution"
)
# parser.set_defaults(norm_G='instance')
return parser
def __init__(self, opt):
super().__init__()
input_nc = 3
# print("xxxxx")
# print(opt.norm_G)
norm_layer = get_nonspade_norm_layer(opt, opt.norm_G)
activation = nn.ReLU(False)
model = []
# initial conv
model += [
nn.ReflectionPad2d(opt.resnet_initial_kernel_size // 2),
norm_layer(nn.Conv2d(input_nc, opt.ngf, kernel_size=opt.resnet_initial_kernel_size, padding=0)),
activation,
]
# downsample
mult = 1
for i in range(opt.resnet_n_downsample):
model += [
norm_layer(nn.Conv2d(opt.ngf * mult, opt.ngf * mult * 2, kernel_size=3, stride=2, padding=1)),
activation,
]
mult *= 2
# resnet blocks
for i in range(opt.resnet_n_blocks):
model += [
ResnetBlock(
opt.ngf * mult,
norm_layer=norm_layer,
activation=activation,
kernel_size=opt.resnet_kernel_size,
)
]
# upsample
for i in range(opt.resnet_n_downsample):
nc_in = int(opt.ngf * mult)
nc_out = int((opt.ngf * mult) / 2)
model += [
norm_layer(
nn.ConvTranspose2d(nc_in, nc_out, kernel_size=3, stride=2, padding=1, output_padding=1)
),
activation,
]
mult = mult // 2
# final output conv
model += [
nn.ReflectionPad2d(3),
nn.Conv2d(nc_out, opt.output_nc, kernel_size=7, padding=0),
nn.Tanh(),
]
self.model = nn.Sequential(*model)
def forward(self, input, degraded_image, z=None):
return self.model(degraded_image)
| Bringing-Old-Photos-Back-to-Life/Face_Enhancement/models/networks/generator.py/0 | {
"file_path": "Bringing-Old-Photos-Back-to-Life/Face_Enhancement/models/networks/generator.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 4241
} | 158 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
class BaseDataLoader():
def __init__(self):
pass
def initialize(self, opt):
self.opt = opt
pass
def load_data():
return None
| Bringing-Old-Photos-Back-to-Life/Global/data/base_data_loader.py/0 | {
"file_path": "Bringing-Old-Photos-Back-to-Life/Global/data/base_data_loader.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 119
} | 159 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import torch.nn as nn
import functools
from torch.autograd import Variable
import numpy as np
from torch.nn.utils import spectral_norm
# from util.util import SwitchNorm2d
import torch.nn.functional as F
###############################################################################
# Functions
###############################################################################
def weights_init(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find("BatchNorm2d") != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def get_norm_layer(norm_type="instance"):
if norm_type == "batch":
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == "instance":
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
elif norm_type == "spectral":
norm_layer = spectral_norm()
elif norm_type == "SwitchNorm":
norm_layer = SwitchNorm2d
else:
raise NotImplementedError("normalization layer [%s] is not found" % norm_type)
return norm_layer
def print_network(net):
if isinstance(net, list):
net = net[0]
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print("Total number of parameters: %d" % num_params)
def define_G(input_nc, output_nc, ngf, netG, k_size=3, n_downsample_global=3, n_blocks_global=9, n_local_enhancers=1,
n_blocks_local=3, norm='instance', gpu_ids=[], opt=None):
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'global':
# if opt.self_gen:
if opt.use_v2:
netG = GlobalGenerator_DCDCv2(input_nc, output_nc, ngf, k_size, n_downsample_global, norm_layer, opt=opt)
else:
netG = GlobalGenerator_v2(input_nc, output_nc, ngf, k_size, n_downsample_global, n_blocks_global, norm_layer, opt=opt)
else:
raise('generator not implemented!')
print(netG)
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
netG.cuda(gpu_ids[0])
netG.apply(weights_init)
return netG
def define_D(input_nc, ndf, n_layers_D, opt, norm='instance', use_sigmoid=False, num_D=1, getIntermFeat=False, gpu_ids=[]):
norm_layer = get_norm_layer(norm_type=norm)
netD = MultiscaleDiscriminator(input_nc, opt, ndf, n_layers_D, norm_layer, use_sigmoid, num_D, getIntermFeat)
print(netD)
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
netD.cuda(gpu_ids[0])
netD.apply(weights_init)
return netD
class GlobalGenerator_DCDCv2(nn.Module):
def __init__(
self,
input_nc,
output_nc,
ngf=64,
k_size=3,
n_downsampling=8,
norm_layer=nn.BatchNorm2d,
padding_type="reflect",
opt=None,
):
super(GlobalGenerator_DCDCv2, self).__init__()
activation = nn.ReLU(True)
model = [
nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, min(ngf, opt.mc), kernel_size=7, padding=0),
norm_layer(ngf),
activation,
]
### downsample
for i in range(opt.start_r):
mult = 2 ** i
model += [
nn.Conv2d(
min(ngf * mult, opt.mc),
min(ngf * mult * 2, opt.mc),
kernel_size=k_size,
stride=2,
padding=1,
),
norm_layer(min(ngf * mult * 2, opt.mc)),
activation,
]
for i in range(opt.start_r, n_downsampling - 1):
mult = 2 ** i
model += [
nn.Conv2d(
min(ngf * mult, opt.mc),
min(ngf * mult * 2, opt.mc),
kernel_size=k_size,
stride=2,
padding=1,
),
norm_layer(min(ngf * mult * 2, opt.mc)),
activation,
]
model += [
ResnetBlock(
min(ngf * mult * 2, opt.mc),
padding_type=padding_type,
activation=activation,
norm_layer=norm_layer,
opt=opt,
)
]
model += [
ResnetBlock(
min(ngf * mult * 2, opt.mc),
padding_type=padding_type,
activation=activation,
norm_layer=norm_layer,
opt=opt,
)
]
mult = 2 ** (n_downsampling - 1)
if opt.spatio_size == 32:
model += [
nn.Conv2d(
min(ngf * mult, opt.mc),
min(ngf * mult * 2, opt.mc),
kernel_size=k_size,
stride=2,
padding=1,
),
norm_layer(min(ngf * mult * 2, opt.mc)),
activation,
]
if opt.spatio_size == 64:
model += [
ResnetBlock(
min(ngf * mult * 2, opt.mc),
padding_type=padding_type,
activation=activation,
norm_layer=norm_layer,
opt=opt,
)
]
model += [
ResnetBlock(
min(ngf * mult * 2, opt.mc),
padding_type=padding_type,
activation=activation,
norm_layer=norm_layer,
opt=opt,
)
]
# model += [nn.Conv2d(min(ngf * mult * 2, opt.mc), min(ngf, opt.mc), 1, 1)]
if opt.feat_dim > 0:
model += [nn.Conv2d(min(ngf * mult * 2, opt.mc), opt.feat_dim, 1, 1)]
self.encoder = nn.Sequential(*model)
# decode
model = []
if opt.feat_dim > 0:
model += [nn.Conv2d(opt.feat_dim, min(ngf * mult * 2, opt.mc), 1, 1)]
# model += [nn.Conv2d(min(ngf, opt.mc), min(ngf * mult * 2, opt.mc), 1, 1)]
o_pad = 0 if k_size == 4 else 1
mult = 2 ** n_downsampling
model += [
ResnetBlock(
min(ngf * mult, opt.mc),
padding_type=padding_type,
activation=activation,
norm_layer=norm_layer,
opt=opt,
)
]
if opt.spatio_size == 32:
model += [
nn.ConvTranspose2d(
min(ngf * mult, opt.mc),
min(int(ngf * mult / 2), opt.mc),
kernel_size=k_size,
stride=2,
padding=1,
output_padding=o_pad,
),
norm_layer(min(int(ngf * mult / 2), opt.mc)),
activation,
]
if opt.spatio_size == 64:
model += [
ResnetBlock(
min(ngf * mult, opt.mc),
padding_type=padding_type,
activation=activation,
norm_layer=norm_layer,
opt=opt,
)
]
for i in range(1, n_downsampling - opt.start_r):
mult = 2 ** (n_downsampling - i)
model += [
ResnetBlock(
min(ngf * mult, opt.mc),
padding_type=padding_type,
activation=activation,
norm_layer=norm_layer,
opt=opt,
)
]
model += [
ResnetBlock(
min(ngf * mult, opt.mc),
padding_type=padding_type,
activation=activation,
norm_layer=norm_layer,
opt=opt,
)
]
model += [
nn.ConvTranspose2d(
min(ngf * mult, opt.mc),
min(int(ngf * mult / 2), opt.mc),
kernel_size=k_size,
stride=2,
padding=1,
output_padding=o_pad,
),
norm_layer(min(int(ngf * mult / 2), opt.mc)),
activation,
]
for i in range(n_downsampling - opt.start_r, n_downsampling):
mult = 2 ** (n_downsampling - i)
model += [
nn.ConvTranspose2d(
min(ngf * mult, opt.mc),
min(int(ngf * mult / 2), opt.mc),
kernel_size=k_size,
stride=2,
padding=1,
output_padding=o_pad,
),
norm_layer(min(int(ngf * mult / 2), opt.mc)),
activation,
]
if opt.use_segmentation_model:
model += [nn.ReflectionPad2d(3), nn.Conv2d(min(ngf, opt.mc), output_nc, kernel_size=7, padding=0)]
else:
model += [
nn.ReflectionPad2d(3),
nn.Conv2d(min(ngf, opt.mc), output_nc, kernel_size=7, padding=0),
nn.Tanh(),
]
self.decoder = nn.Sequential(*model)
def forward(self, input, flow="enc_dec"):
if flow == "enc":
return self.encoder(input)
elif flow == "dec":
return self.decoder(input)
elif flow == "enc_dec":
x = self.encoder(input)
x = self.decoder(x)
return x
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(
self, dim, padding_type, norm_layer, opt, activation=nn.ReLU(True), use_dropout=False, dilation=1
):
super(ResnetBlock, self).__init__()
self.opt = opt
self.dilation = dilation
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, activation, use_dropout)
def build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout):
conv_block = []
p = 0
if padding_type == "reflect":
conv_block += [nn.ReflectionPad2d(self.dilation)]
elif padding_type == "replicate":
conv_block += [nn.ReplicationPad2d(self.dilation)]
elif padding_type == "zero":
p = self.dilation
else:
raise NotImplementedError("padding [%s] is not implemented" % padding_type)
conv_block += [
nn.Conv2d(dim, dim, kernel_size=3, padding=p, dilation=self.dilation),
norm_layer(dim),
activation,
]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == "reflect":
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == "replicate":
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == "zero":
p = 1
else:
raise NotImplementedError("padding [%s] is not implemented" % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, dilation=1), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
class Encoder(nn.Module):
def __init__(self, input_nc, output_nc, ngf=32, n_downsampling=4, norm_layer=nn.BatchNorm2d):
super(Encoder, self).__init__()
self.output_nc = output_nc
model = [
nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0),
norm_layer(ngf),
nn.ReLU(True),
]
### downsample
for i in range(n_downsampling):
mult = 2 ** i
model += [
nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
norm_layer(ngf * mult * 2),
nn.ReLU(True),
]
### upsample
for i in range(n_downsampling):
mult = 2 ** (n_downsampling - i)
model += [
nn.ConvTranspose2d(
ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1
),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True),
]
model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input, inst):
outputs = self.model(input)
# instance-wise average pooling
outputs_mean = outputs.clone()
inst_list = np.unique(inst.cpu().numpy().astype(int))
for i in inst_list:
for b in range(input.size()[0]):
indices = (inst[b : b + 1] == int(i)).nonzero() # n x 4
for j in range(self.output_nc):
output_ins = outputs[indices[:, 0] + b, indices[:, 1] + j, indices[:, 2], indices[:, 3]]
mean_feat = torch.mean(output_ins).expand_as(output_ins)
outputs_mean[
indices[:, 0] + b, indices[:, 1] + j, indices[:, 2], indices[:, 3]
] = mean_feat
return outputs_mean
def SN(module, mode=True):
if mode:
return torch.nn.utils.spectral_norm(module)
return module
class NonLocalBlock2D_with_mask_Res(nn.Module):
def __init__(
self,
in_channels,
inter_channels,
mode="add",
re_norm=False,
temperature=1.0,
use_self=False,
cosin=False,
):
super(NonLocalBlock2D_with_mask_Res, self).__init__()
self.cosin = cosin
self.renorm = re_norm
self.in_channels = in_channels
self.inter_channels = inter_channels
self.g = nn.Conv2d(
in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0
)
self.W = nn.Conv2d(
in_channels=self.inter_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0
)
# for pytorch 0.3.1
# nn.init.constant(self.W.weight, 0)
# nn.init.constant(self.W.bias, 0)
# for pytorch 0.4.0
nn.init.constant_(self.W.weight, 0)
nn.init.constant_(self.W.bias, 0)
self.theta = nn.Conv2d(
in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0
)
self.phi = nn.Conv2d(
in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0
)
self.mode = mode
self.temperature = temperature
self.use_self = use_self
norm_layer = get_norm_layer(norm_type="instance")
activation = nn.ReLU(True)
model = []
for i in range(3):
model += [
ResnetBlock(
inter_channels,
padding_type="reflect",
activation=activation,
norm_layer=norm_layer,
opt=None,
)
]
self.res_block = nn.Sequential(*model)
def forward(self, x, mask): ## The shape of mask is Batch*1*H*W
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
if self.cosin:
theta_x = F.normalize(theta_x, dim=2)
phi_x = F.normalize(phi_x, dim=1)
f = torch.matmul(theta_x, phi_x)
f /= self.temperature
f_div_C = F.softmax(f, dim=2)
tmp = 1 - mask
mask = F.interpolate(mask, (x.size(2), x.size(3)), mode="bilinear")
mask[mask > 0] = 1.0
mask = 1 - mask
tmp = F.interpolate(tmp, (x.size(2), x.size(3)))
mask *= tmp
mask_expand = mask.view(batch_size, 1, -1)
mask_expand = mask_expand.repeat(1, x.size(2) * x.size(3), 1)
# mask = 1 - mask
# mask=F.interpolate(mask,(x.size(2),x.size(3)))
# mask_expand=mask.view(batch_size,1,-1)
# mask_expand=mask_expand.repeat(1,x.size(2)*x.size(3),1)
if self.use_self:
mask_expand[:, range(x.size(2) * x.size(3)), range(x.size(2) * x.size(3))] = 1.0
# print(mask_expand.shape)
# print(f_div_C.shape)
f_div_C = mask_expand * f_div_C
if self.renorm:
f_div_C = F.normalize(f_div_C, p=1, dim=2)
###########################
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y)
W_y = self.res_block(W_y)
if self.mode == "combine":
full_mask = mask.repeat(1, self.inter_channels, 1, 1)
z = full_mask * x + (1 - full_mask) * W_y
return z
class MultiscaleDiscriminator(nn.Module):
def __init__(self, input_nc, opt, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d,
use_sigmoid=False, num_D=3, getIntermFeat=False):
super(MultiscaleDiscriminator, self).__init__()
self.num_D = num_D
self.n_layers = n_layers
self.getIntermFeat = getIntermFeat
for i in range(num_D):
netD = NLayerDiscriminator(input_nc, opt, ndf, n_layers, norm_layer, use_sigmoid, getIntermFeat)
if getIntermFeat:
for j in range(n_layers+2):
setattr(self, 'scale'+str(i)+'_layer'+str(j), getattr(netD, 'model'+str(j)))
else:
setattr(self, 'layer'+str(i), netD.model)
self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
def singleD_forward(self, model, input):
if self.getIntermFeat:
result = [input]
for i in range(len(model)):
result.append(model[i](result[-1]))
return result[1:]
else:
return [model(input)]
def forward(self, input):
num_D = self.num_D
result = []
input_downsampled = input
for i in range(num_D):
if self.getIntermFeat:
model = [getattr(self, 'scale'+str(num_D-1-i)+'_layer'+str(j)) for j in range(self.n_layers+2)]
else:
model = getattr(self, 'layer'+str(num_D-1-i))
result.append(self.singleD_forward(model, input_downsampled))
if i != (num_D-1):
input_downsampled = self.downsample(input_downsampled)
return result
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, opt, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, getIntermFeat=False):
super(NLayerDiscriminator, self).__init__()
self.getIntermFeat = getIntermFeat
self.n_layers = n_layers
kw = 4
padw = int(np.ceil((kw-1.0)/2))
sequence = [[SN(nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),opt.use_SN), nn.LeakyReLU(0.2, True)]]
nf = ndf
for n in range(1, n_layers):
nf_prev = nf
nf = min(nf * 2, 512)
sequence += [[
SN(nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw),opt.use_SN),
norm_layer(nf), nn.LeakyReLU(0.2, True)
]]
nf_prev = nf
nf = min(nf * 2, 512)
sequence += [[
SN(nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw),opt.use_SN),
norm_layer(nf),
nn.LeakyReLU(0.2, True)
]]
sequence += [[SN(nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw),opt.use_SN)]]
if use_sigmoid:
sequence += [[nn.Sigmoid()]]
if getIntermFeat:
for n in range(len(sequence)):
setattr(self, 'model'+str(n), nn.Sequential(*sequence[n]))
else:
sequence_stream = []
for n in range(len(sequence)):
sequence_stream += sequence[n]
self.model = nn.Sequential(*sequence_stream)
def forward(self, input):
if self.getIntermFeat:
res = [input]
for n in range(self.n_layers+2):
model = getattr(self, 'model'+str(n))
res.append(model(res[-1]))
return res[1:]
else:
return self.model(input)
class Patch_Attention_4(nn.Module): ## While combine the feature map, use conv and mask
def __init__(self, in_channels, inter_channels, patch_size):
super(Patch_Attention_4, self).__init__()
self.patch_size=patch_size
# self.g = nn.Conv2d(
# in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0
# )
# self.W = nn.Conv2d(
# in_channels=self.inter_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0
# )
# # for pytorch 0.3.1
# # nn.init.constant(self.W.weight, 0)
# # nn.init.constant(self.W.bias, 0)
# # for pytorch 0.4.0
# nn.init.constant_(self.W.weight, 0)
# nn.init.constant_(self.W.bias, 0)
# self.theta = nn.Conv2d(
# in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0
# )
# self.phi = nn.Conv2d(
# in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0
# )
self.F_Combine=nn.Conv2d(in_channels=1025,out_channels=512,kernel_size=3,stride=1,padding=1,bias=True)
norm_layer = get_norm_layer(norm_type="instance")
activation = nn.ReLU(True)
model = []
for i in range(1):
model += [
ResnetBlock(
inter_channels,
padding_type="reflect",
activation=activation,
norm_layer=norm_layer,
opt=None,
)
]
self.res_block = nn.Sequential(*model)
def Hard_Compose(self, input, dim, index):
# batch index select
# input: [B,C,HW]
# dim: scalar > 0
# index: [B, HW]
views = [input.size(0)] + [1 if i!=dim else -1 for i in range(1, len(input.size()))]
expanse = list(input.size())
expanse[0] = -1
expanse[dim] = -1
index = index.view(views).expand(expanse)
return torch.gather(input, dim, index)
def forward(self, z, mask): ## The shape of mask is Batch*1*H*W
x=self.res_block(z)
b,c,h,w=x.shape
## mask resize + dilation
# tmp = 1 - mask
mask = F.interpolate(mask, (x.size(2), x.size(3)), mode="bilinear")
mask[mask > 0] = 1.0
# mask = 1 - mask
# tmp = F.interpolate(tmp, (x.size(2), x.size(3)))
# mask *= tmp
# mask=1-mask
## 1: mask position 0: non-mask
mask_unfold=F.unfold(mask, kernel_size=(self.patch_size,self.patch_size), padding=0, stride=self.patch_size)
non_mask_region=(torch.mean(mask_unfold,dim=1,keepdim=True)>0.6).float()
all_patch_num=h*w/self.patch_size/self.patch_size
non_mask_region=non_mask_region.repeat(1,int(all_patch_num),1)
x_unfold=F.unfold(x, kernel_size=(self.patch_size,self.patch_size), padding=0, stride=self.patch_size)
y_unfold=x_unfold.permute(0,2,1)
x_unfold_normalized=F.normalize(x_unfold,dim=1)
y_unfold_normalized=F.normalize(y_unfold,dim=2)
correlation_matrix=torch.bmm(y_unfold_normalized,x_unfold_normalized)
correlation_matrix=correlation_matrix.masked_fill(non_mask_region==1.,-1e9)
correlation_matrix=F.softmax(correlation_matrix,dim=2)
# print(correlation_matrix)
R, max_arg=torch.max(correlation_matrix,dim=2)
composed_unfold=self.Hard_Compose(x_unfold, 2, max_arg)
composed_fold=F.fold(composed_unfold,output_size=(h,w),kernel_size=(self.patch_size,self.patch_size),padding=0,stride=self.patch_size)
concat_1=torch.cat((z,composed_fold,mask),dim=1)
concat_1=self.F_Combine(concat_1)
return concat_1
def inference_forward(self,z,mask): ## Reduce the extra memory cost
x=self.res_block(z)
b,c,h,w=x.shape
## mask resize + dilation
# tmp = 1 - mask
mask = F.interpolate(mask, (x.size(2), x.size(3)), mode="bilinear")
mask[mask > 0] = 1.0
# mask = 1 - mask
# tmp = F.interpolate(tmp, (x.size(2), x.size(3)))
# mask *= tmp
# mask=1-mask
## 1: mask position 0: non-mask
mask_unfold=F.unfold(mask, kernel_size=(self.patch_size,self.patch_size), padding=0, stride=self.patch_size)
non_mask_region=(torch.mean(mask_unfold,dim=1,keepdim=True)>0.6).float()[0,0,:] # 1*1*all_patch_num
all_patch_num=h*w/self.patch_size/self.patch_size
mask_index=torch.nonzero(non_mask_region,as_tuple=True)[0]
if len(mask_index)==0: ## No mask patch is selected, no attention is needed
composed_fold=x
else:
unmask_index=torch.nonzero(non_mask_region!=1,as_tuple=True)[0]
x_unfold=F.unfold(x, kernel_size=(self.patch_size,self.patch_size), padding=0, stride=self.patch_size)
Query_Patch=torch.index_select(x_unfold,2,mask_index)
Key_Patch=torch.index_select(x_unfold,2,unmask_index)
Query_Patch=Query_Patch.permute(0,2,1)
Query_Patch_normalized=F.normalize(Query_Patch,dim=2)
Key_Patch_normalized=F.normalize(Key_Patch,dim=1)
correlation_matrix=torch.bmm(Query_Patch_normalized,Key_Patch_normalized)
correlation_matrix=F.softmax(correlation_matrix,dim=2)
R, max_arg=torch.max(correlation_matrix,dim=2)
composed_unfold=self.Hard_Compose(Key_Patch, 2, max_arg)
x_unfold[:,:,mask_index]=composed_unfold
composed_fold=F.fold(x_unfold,output_size=(h,w),kernel_size=(self.patch_size,self.patch_size),padding=0,stride=self.patch_size)
concat_1=torch.cat((z,composed_fold,mask),dim=1)
concat_1=self.F_Combine(concat_1)
return concat_1
##############################################################################
# Losses
##############################################################################
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
if target_is_real:
create_label = ((self.real_label_var is None) or
(self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or
(self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
if isinstance(input[0], list):
loss = 0
for input_i in input:
pred = input_i[-1]
target_tensor = self.get_target_tensor(pred, target_is_real)
loss += self.loss(pred, target_tensor)
return loss
else:
target_tensor = self.get_target_tensor(input[-1], target_is_real)
return self.loss(input[-1], target_tensor)
####################################### VGG Loss
from torchvision import models
class VGG19_torch(torch.nn.Module):
def __init__(self, requires_grad=False):
super(VGG19_torch, self).__init__()
vgg_pretrained_features = models.vgg19(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out
class VGGLoss_torch(nn.Module):
def __init__(self, gpu_ids):
super(VGGLoss_torch, self).__init__()
self.vgg = VGG19_torch().cuda()
self.criterion = nn.L1Loss()
self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]
def forward(self, x, y):
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
loss = 0
for i in range(len(x_vgg)):
loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())
return loss | Bringing-Old-Photos-Back-to-Life/Global/models/networks.py/0 | {
"file_path": "Bringing-Old-Photos-Back-to-Life/Global/models/networks.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 16443
} | 160 |
# Old Photo Restoration (Official PyTorch Implementation)
<img src='imgs/0001.jpg'/>
### [Project Page](http://raywzy.com/Old_Photo/) | [Paper (CVPR version)](https://arxiv.org/abs/2004.09484) | [Paper (Journal version)](https://arxiv.org/pdf/2009.07047v1.pdf) | [Pretrained Model](https://hkustconnect-my.sharepoint.com/:f:/g/personal/bzhangai_connect_ust_hk/Em0KnYOeSSxFtp4g_dhWdf0BdeT3tY12jIYJ6qvSf300cA?e=nXkJH2) | [Colab Demo](https://colab.research.google.com/drive/1NEm6AsybIiC5TwTU_4DqDkQO0nFRB-uA?usp=sharing) | [Replicate Demo & Docker Image](https://replicate.ai/zhangmozhe/bringing-old-photos-back-to-life) :fire:
**Bringing Old Photos Back to Life, CVPR2020 (Oral)**
**Old Photo Restoration via Deep Latent Space Translation, TPAMI 2022**
[Ziyu Wan](http://raywzy.com/)<sup>1</sup>,
[Bo Zhang](https://www.microsoft.com/en-us/research/people/zhanbo/)<sup>2</sup>,
[Dongdong Chen](http://www.dongdongchen.bid/)<sup>3</sup>,
[Pan Zhang](https://panzhang0212.github.io/)<sup>4</sup>,
[Dong Chen](https://www.microsoft.com/en-us/research/people/doch/)<sup>2</sup>,
[Jing Liao](https://liaojing.github.io/html/)<sup>1</sup>,
[Fang Wen](https://www.microsoft.com/en-us/research/people/fangwen/)<sup>2</sup> <br>
<sup>1</sup>City University of Hong Kong, <sup>2</sup>Microsoft Research Asia, <sup>3</sup>Microsoft Cloud AI, <sup>4</sup>USTC
<!-- ## Notes of this project
The code originates from our research project and the aim is to demonstrate the research idea, so we have not optimized it from a product perspective. And we will spend time to address some common issues, such as out of memory issue, limited resolution, but will not involve too much in engineering problems, such as speedup of the inference, fastapi deployment and so on. **We welcome volunteers to contribute to this project to make it more usable for practical application.** -->
## :sparkles: News
**2022.3.31**: Our new work regarding old film restoration will be published in CVPR 2022. For more details, please refer to the [project website](http://raywzy.com/Old_Film/) and [github repo](https://github.com/raywzy/Bringing-Old-Films-Back-to-Life).
The framework now supports the restoration of high-resolution input.
<img src='imgs/HR_result.png'>
Training code is available and welcome to have a try and learn the training details.
You can now play with our [Colab](https://colab.research.google.com/drive/1NEm6AsybIiC5TwTU_4DqDkQO0nFRB-uA?usp=sharing) and try it on your photos.
## Requirement
The code is tested on Ubuntu with Nvidia GPUs and CUDA installed. Python>=3.6 is required to run the code.
## Installation
Clone the Synchronized-BatchNorm-PyTorch repository for
```
cd Face_Enhancement/models/networks/
git clone https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
cp -rf Synchronized-BatchNorm-PyTorch/sync_batchnorm .
cd ../../../
```
```
cd Global/detection_models
git clone https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
cp -rf Synchronized-BatchNorm-PyTorch/sync_batchnorm .
cd ../../
```
Download the landmark detection pretrained model
```
cd Face_Detection/
wget http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
bzip2 -d shape_predictor_68_face_landmarks.dat.bz2
cd ../
```
Download the pretrained model, put the file `Face_Enhancement/checkpoints.zip` under `./Face_Enhancement`, and put the file `Global/checkpoints.zip` under `./Global`. Then unzip them respectively.
```
cd Face_Enhancement/
wget https://github.com/microsoft/Bringing-Old-Photos-Back-to-Life/releases/download/v1.0/face_checkpoints.zip
unzip face_checkpoints.zip
cd ../
cd Global/
wget https://github.com/microsoft/Bringing-Old-Photos-Back-to-Life/releases/download/v1.0/global_checkpoints.zip
unzip global_checkpoints.zip
cd ../
```
Install dependencies:
```bash
pip install -r requirements.txt
```
## :rocket: How to use?
**Note**: GPU can be set 0 or 0,1,2 or 0,2; use -1 for CPU
### 1) Full Pipeline
You could easily restore the old photos with one simple command after installation and downloading the pretrained model.
For images without scratches:
```
python run.py --input_folder [test_image_folder_path] \
--output_folder [output_path] \
--GPU 0
```
For scratched images:
```
python run.py --input_folder [test_image_folder_path] \
--output_folder [output_path] \
--GPU 0 \
--with_scratch
```
**For high-resolution images with scratches**:
```
python run.py --input_folder [test_image_folder_path] \
--output_folder [output_path] \
--GPU 0 \
--with_scratch \
--HR
```
Note: Please try to use the absolute path. The final results will be saved in `./output_path/final_output/`. You could also check the produced results of different steps in `output_path`.
### 2) Scratch Detection
Currently we don't plan to release the scratched old photos dataset with labels directly. If you want to get the paired data, you could use our pretrained model to test the collected images to obtain the labels.
```
cd Global/
python detection.py --test_path [test_image_folder_path] \
--output_dir [output_path] \
--input_size [resize_256|full_size|scale_256]
```
<img src='imgs/scratch_detection.png'>
### 3) Global Restoration
A triplet domain translation network is proposed to solve both structured degradation and unstructured degradation of old photos.
<p align="center">
<img src='imgs/pipeline.PNG' width="50%" height="50%"/>
</p>
```
cd Global/
python test.py --Scratch_and_Quality_restore \
--test_input [test_image_folder_path] \
--test_mask [corresponding mask] \
--outputs_dir [output_path]
python test.py --Quality_restore \
--test_input [test_image_folder_path] \
--outputs_dir [output_path]
```
<img src='imgs/global.png'>
### 4) Face Enhancement
We use a progressive generator to refine the face regions of old photos. More details could be found in our journal submission and `./Face_Enhancement` folder.
<p align="center">
<img src='imgs/face_pipeline.jpg' width="60%" height="60%"/>
</p>
<img src='imgs/face.png'>
> *NOTE*:
> This repo is mainly for research purpose and we have not yet optimized the running performance.
>
> Since the model is pretrained with 256*256 images, the model may not work ideally for arbitrary resolution.
### 5) GUI
A user-friendly GUI which takes input of image by user and shows result in respective window.
#### How it works:
1. Run GUI.py file.
2. Click browse and select your image from test_images/old_w_scratch folder to remove scratches.
3. Click Modify Photo button.
4. Wait for a while and see results on GUI window.
5. Exit window by clicking Exit Window and get your result image in output folder.
<img src='imgs/gui.PNG'>
## How to train?
### 1) Create Training File
Put the folders of VOC dataset, collected old photos (e.g., Real_L_old and Real_RGB_old) into one shared folder. Then
```
cd Global/data/
python Create_Bigfile.py
```
Note: Remember to modify the code based on your own environment.
### 2) Train the VAEs of domain A and domain B respectively
```
cd ..
python train_domain_A.py --use_v2_degradation --continue_train --training_dataset domain_A --name domainA_SR_old_photos --label_nc 0 --loadSize 256 --fineSize 256 --dataroot [your_data_folder] --no_instance --resize_or_crop crop_only --batchSize 100 --no_html --gpu_ids 0,1,2,3 --self_gen --nThreads 4 --n_downsample_global 3 --k_size 4 --use_v2 --mc 64 --start_r 1 --kl 1 --no_cgan --outputs_dir [your_output_folder] --checkpoints_dir [your_ckpt_folder]
python train_domain_B.py --continue_train --training_dataset domain_B --name domainB_old_photos --label_nc 0 --loadSize 256 --fineSize 256 --dataroot [your_data_folder] --no_instance --resize_or_crop crop_only --batchSize 120 --no_html --gpu_ids 0,1,2,3 --self_gen --nThreads 4 --n_downsample_global 3 --k_size 4 --use_v2 --mc 64 --start_r 1 --kl 1 --no_cgan --outputs_dir [your_output_folder] --checkpoints_dir [your_ckpt_folder]
```
Note: For the --name option, please ensure your experiment name contains "domainA" or "domainB", which will be used to select different dataset.
### 3) Train the mapping network between domains
Train the mapping without scratches:
```
python train_mapping.py --use_v2_degradation --training_dataset mapping --use_vae_which_epoch 200 --continue_train --name mapping_quality --label_nc 0 --loadSize 256 --fineSize 256 --dataroot [your_data_folder] --no_instance --resize_or_crop crop_only --batchSize 80 --no_html --gpu_ids 0,1,2,3 --nThreads 8 --load_pretrainA [ckpt_of_domainA_SR_old_photos] --load_pretrainB [ckpt_of_domainB_old_photos] --l2_feat 60 --n_downsample_global 3 --mc 64 --k_size 4 --start_r 1 --mapping_n_block 6 --map_mc 512 --use_l1_feat --niter 150 --niter_decay 100 --outputs_dir [your_output_folder] --checkpoints_dir [your_ckpt_folder]
```
Traing the mapping with scraches:
```
python train_mapping.py --no_TTUR --NL_res --random_hole --use_SN --correlation_renormalize --training_dataset mapping --NL_use_mask --NL_fusion_method combine --non_local Setting_42 --use_v2_degradation --use_vae_which_epoch 200 --continue_train --name mapping_scratch --label_nc 0 --loadSize 256 --fineSize 256 --dataroot [your_data_folder] --no_instance --resize_or_crop crop_only --batchSize 36 --no_html --gpu_ids 0,1,2,3 --nThreads 8 --load_pretrainA [ckpt_of_domainA_SR_old_photos] --load_pretrainB [ckpt_of_domainB_old_photos] --l2_feat 60 --n_downsample_global 3 --mc 64 --k_size 4 --start_r 1 --mapping_n_block 6 --map_mc 512 --use_l1_feat --niter 150 --niter_decay 100 --outputs_dir [your_output_folder] --checkpoints_dir [your_ckpt_folder] --irregular_mask [absolute_path_of_mask_file]
```
Traing the mapping with scraches (Multi-Scale Patch Attention for HR input):
```
python train_mapping.py --no_TTUR --NL_res --random_hole --use_SN --correlation_renormalize --training_dataset mapping --NL_use_mask --NL_fusion_method combine --non_local Setting_42 --use_v2_degradation --use_vae_which_epoch 200 --continue_train --name mapping_Patch_Attention --label_nc 0 --loadSize 256 --fineSize 256 --dataroot [your_data_folder] --no_instance --resize_or_crop crop_only --batchSize 36 --no_html --gpu_ids 0,1,2,3 --nThreads 8 --load_pretrainA [ckpt_of_domainA_SR_old_photos] --load_pretrainB [ckpt_of_domainB_old_photos] --l2_feat 60 --n_downsample_global 3 --mc 64 --k_size 4 --start_r 1 --mapping_n_block 6 --map_mc 512 --use_l1_feat --niter 150 --niter_decay 100 --outputs_dir [your_output_folder] --checkpoints_dir [your_ckpt_folder] --irregular_mask [absolute_path_of_mask_file] --mapping_exp 1
```
## Citation
If you find our work useful for your research, please consider citing the following papers :)
```bibtex
@inproceedings{wan2020bringing,
title={Bringing Old Photos Back to Life},
author={Wan, Ziyu and Zhang, Bo and Chen, Dongdong and Zhang, Pan and Chen, Dong and Liao, Jing and Wen, Fang},
booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
pages={2747--2757},
year={2020}
}
```
```bibtex
@article{wan2020old,
title={Old Photo Restoration via Deep Latent Space Translation},
author={Wan, Ziyu and Zhang, Bo and Chen, Dongdong and Zhang, Pan and Chen, Dong and Liao, Jing and Wen, Fang},
journal={arXiv preprint arXiv:2009.07047},
year={2020}
}
```
If you are also interested in the legacy photo/video colorization, please refer to [this work](https://github.com/zhangmozhe/video-colorization).
## Maintenance
This project is currently maintained by Ziyu Wan and is for academic research use only. If you have any questions, feel free to contact raywzy@gmail.com.
## License
The codes and the pretrained model in this repository are under the MIT license as specified by the LICENSE file. We use our labeled dataset to train the scratch detection model.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
| Bringing-Old-Photos-Back-to-Life/README.md/0 | {
"file_path": "Bringing-Old-Photos-Back-to-Life/README.md",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 4200
} | 161 |
apiVersion: v1
kind: Pod
metadata:
name: photo-back2life
spec:
containers:
- name: photos-back2life
image: <YOUR IMAGE>
volumeMounts:
- mountPath: /in
name: in-folder
- mountPath: /out
name: out-folder
command:
- python
- /app/run.py
args:
- --input_folder
- /in
- --output_folder
- /out
- --GPU
- '0'
- --with_scratch
resources:
limits:
memory: 4Gi
cpu: 0
nvidia.com/gpu: 1
volumes:
- name: in-folder
hostPath:
path: /srv/in
type: Directory
- name: out-folder
hostPath:
path: /srv/out
type: Directory
| Bringing-Old-Photos-Back-to-Life/kubernetes-pod.yml/0 | {
"file_path": "Bringing-Old-Photos-Back-to-Life/kubernetes-pod.yml",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 393
} | 162 |
import torch
import torch.nn as nn
from torch.nn import functional as nnf
from enum import Enum
from transformers import GPT2LMHeadModel
from typing import Tuple, Optional
def get_clapcap(name: str):
if name == "ClapCaption":
return ClapCaptionModel
else:
raise Exception('The ClapCap model {} is incorrect or not supported'.format(name))
class MappingType(Enum):
MLP = 'mlp'
Transformer = 'transformer'
class MLP(nn.Module):
def __init__(self, sizes: Tuple[int, ...], bias=True, act=nn.Tanh):
super(MLP, self).__init__()
layers = []
for i in range(len(sizes) - 1):
layers.append(nn.Linear(sizes[i], sizes[i + 1], bias=bias))
if i < len(sizes) - 2:
layers.append(act())
self.model = nn.Sequential(*layers)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
class MlpTransformer(nn.Module):
def __init__(self, in_dim, h_dim, out_d: Optional[int] = None, act=nnf.relu, dropout=0.):
super().__init__()
out_d = out_d if out_d is not None else in_dim
self.fc1 = nn.Linear(in_dim, h_dim)
self.act = act
self.fc2 = nn.Linear(h_dim, out_d)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.dropout(x)
return x
class MultiHeadAttention(nn.Module):
def __init__(self, dim_self, dim_ref, num_heads, bias=True, dropout=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim_self // num_heads
self.scale = head_dim ** -0.5
self.to_queries = nn.Linear(dim_self, dim_self, bias=bias)
self.to_keys_values = nn.Linear(dim_ref, dim_self * 2, bias=bias)
self.project = nn.Linear(dim_self, dim_self)
self.dropout = nn.Dropout(dropout)
def forward(self, x, y=None, mask=None):
y = y if y is not None else x
b, n, c = x.shape
_, m, d = y.shape
# b n h dh
queries = self.to_queries(x).reshape(b, n, self.num_heads, c // self.num_heads)
# b m 2 h dh
keys_values = self.to_keys_values(y).reshape(b, m, 2, self.num_heads, c // self.num_heads)
keys, values = keys_values[:, :, 0], keys_values[:, :, 1]
attention = torch.einsum('bnhd,bmhd->bnmh', queries, keys) * self.scale
if mask is not None:
if mask.dim() == 2:
mask = mask.unsqueeze(1)
attention = attention.masked_fill(mask.unsqueeze(3), float("-inf"))
attention = attention.softmax(dim=2)
out = torch.einsum('bnmh,bmhd->bnhd', attention, values).reshape(b, n, c)
out = self.project(out)
return out, attention
class TransformerLayer(nn.Module):
def forward_with_attention(self, x, y=None, mask=None):
x_, attention = self.attn(self.norm1(x), y, mask)
x = x + x_
x = x + self.mlp(self.norm2(x))
return x, attention
def forward(self, x, y=None, mask=None):
x = x + self.attn(self.norm1(x), y, mask)[0]
x = x + self.mlp(self.norm2(x))
return x
def __init__(self, dim_self, dim_ref, num_heads, mlp_ratio=4., bias=False, dropout=0., act=nnf.relu,
norm_layer: nn.Module = nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim_self)
self.attn = MultiHeadAttention(dim_self, dim_ref, num_heads, bias=bias, dropout=dropout)
self.norm2 = norm_layer(dim_self)
self.mlp = MlpTransformer(dim_self, int(dim_self * mlp_ratio), act=act, dropout=dropout)
class Transformer(nn.Module):
def __init__(self, dim_self: int, num_heads: int, num_layers: int, dim_ref: Optional[int] = None,
mlp_ratio: float = 2., act=nnf.relu, norm_layer: nn.Module = nn.LayerNorm, enc_dec: bool = False):
super(Transformer, self).__init__()
dim_ref = dim_ref if dim_ref is not None else dim_self
self.enc_dec = enc_dec
if enc_dec:
num_layers = num_layers * 2
layers = []
for i in range(num_layers):
if i % 2 == 0 and enc_dec: # cross
layers.append(TransformerLayer(dim_self, dim_ref, num_heads, mlp_ratio, act=act, norm_layer=norm_layer))
elif enc_dec: # self
layers.append(TransformerLayer(dim_self, dim_self, num_heads, mlp_ratio, act=act, norm_layer=norm_layer))
else: # self or cross
layers.append(TransformerLayer(dim_self, dim_ref, num_heads, mlp_ratio, act=act, norm_layer=norm_layer))
self.layers = nn.ModuleList(layers)
def forward_with_attention(self, x, y=None, mask=None):
attentions = []
for layer in self.layers:
x, att = layer.forward_with_attention(x, y, mask)
attentions.append(att)
return x, attentions
def forward(self, x, y=None, mask=None):
for i, layer in enumerate(self.layers):
if i % 2 == 0 and self.enc_dec: # cross
x = layer(x, y)
elif self.enc_dec: # self
x = layer(x, x, mask)
else: # self or cross
x = layer(x, y, mask)
return x
class TransformerMapper(nn.Module):
def __init__(self, dim_clip: int, dim_embedding: int, prefix_length: int, clip_length: int, num_layers: int = 8):
super(TransformerMapper, self).__init__()
self.clip_length = clip_length
self.transformer = Transformer(dim_embedding, 8, num_layers)
self.linear = nn.Linear(dim_clip, clip_length * dim_embedding)
self.prefix_const = nn.Parameter(torch.randn(prefix_length, dim_embedding), requires_grad=True)
def forward(self, x):
x = self.linear(x).view(x.shape[0], self.clip_length, -1)
prefix = self.prefix_const.unsqueeze(0).expand(x.shape[0], *self.prefix_const.shape)
prefix = torch.cat((x, prefix), dim=1)
out = self.transformer(prefix)[:, self.clip_length:]
return out
class ClapCaptionModel(nn.Module):
def __init__(self, clap, text_decoder: str, prefix_length: int, clip_length: Optional[int] = None, prefix_size: int = 512,
num_layers: int = 8, normalize_prefix: bool = True, mapping_type: str = None,\
freeze_audio_encoder_weights: bool = True, freeze_gpt_weights: bool = True):
super(ClapCaptionModel, self).__init__()
self.clap = clap.audio_encoder
self.prefix_length = prefix_length
self.normalize_prefix = normalize_prefix
self.gpt = GPT2LMHeadModel.from_pretrained(text_decoder)
self.gpt_embedding_size = self.gpt.transformer.wte.weight.shape[1]
if mapping_type == 'mlp':
self.clap_project = MLP((prefix_size, (self.gpt_embedding_size * prefix_length) // 2,
self.gpt_embedding_size * prefix_length))
else:
self.clap_project = TransformerMapper(prefix_size, self.gpt_embedding_size, prefix_length,
clip_length, num_layers)
# Freeze all CLAP parameters
if freeze_audio_encoder_weights:
for p in self.clap.parameters():
p.requires_grad = False
if freeze_gpt_weights:
for p in self.gpt.parameters():
p.requires_grad = False
def get_dummy_token(self, batch_size: int, device: torch.device) -> torch.Tensor:
return torch.zeros(batch_size, self.prefix_length, dtype=torch.int64, device=device)
def forward(self, audios: torch.Tensor, tokens: torch.Tensor, mask: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None):
# get audio embeddings
prefix, _ = self.clap(audios)
# normalize prefix (audio embedding)
if self.normalize_prefix:
prefix = prefix / prefix.norm(2, -1).reshape(-1,1)
embedding_text = self.gpt.transformer.wte(tokens['input_ids'])
prefix_projections = self.clap_project(prefix).view(-1, self.prefix_length, self.gpt_embedding_size)
embedding_cat = torch.cat((prefix_projections, embedding_text), dim=1)
if labels is not None:
dummy_token = self.get_dummy_token(tokens['input_ids'].shape[0], tokens['input_ids'].device)
labels = torch.cat((dummy_token, tokens), dim=1)
out = self.gpt(inputs_embeds=embedding_cat, labels=labels, attention_mask=mask)
return out | CLAP/msclap/models/mapper.py/0 | {
"file_path": "CLAP/msclap/models/mapper.py",
"repo_id": "CLAP",
"token_count": 4039
} | 163 |
## Hydra
[Hydra](https://github.com/facebookresearch/hydra) is an open-source Python
framework that simplifies the development of research and other complex
applications. The key feature is the ability to dynamically create a
hierarchical configuration by composition and override it through config files
and the command line. The name Hydra comes from its ability to run multiple
similar jobs - much like a Hydra with multiple heads.
## Motivation
Until recently, all components in fairseq were configured through a shared
`args` namespace that was created at application startup. Components declared
their own `add_args` method to update the argparse parser, hoping that the names
would not clash with arguments from other components. While this model works for
smaller applications, as fairseq grew and became integrated into other
applications, this became problematic. In order to determine how to configure
each component, one needed to a) examine what args were added by this component,
and b) read the code to figure out what shared arguments it is using that were
added in other places. Reproducing models involved sharing commands that often
contained dozens of command line switches.
The model described above is still supported by fairseq for backward
compatibility, but will be deprecated some time in the future.
New components in fairseq should now create a dataclass that encapsulates all
parameters required to configure this component. The dataclass is registered
along with the component, and fairseq takes care of constructing and providing
this configuration object to the component's constructor. Note that sharing
parameters can optionally still work, but one has to explicitly point to the
"source of truth" (see inheritance example below). These changes make components
in fairseq more independent and re-usable by other applications: all that is
needed to create a component is to initialize its dataclass and overwrite some
of the defaults.
While configuring fairseq through command line (using either the legacy argparse
based or the new Hydra based entry points) is still fully supported, you can now
take advantage of configuring fairseq completely or piece-by-piece through
hierarchical YAML configuration files. These files can also be shipped as
examples that others can use to run an identically configured job.
Additionally, Hydra has a rich and growing [library of
plugins](https://github.com/facebookresearch/hydra/tree/master/plugins) that
provide functionality such as hyperparameter sweeping (including using bayesian
optimization through the [Ax](https://github.com/facebook/Ax) library), job
launching across various platforms, and more.
## Creating or migrating components
In general, each new (or updated) component should provide a companion
[dataclass](https://www.python.org/dev/peps/pep-0557/). These dataclass are
typically located in the same file as the component and are passed as arguments
to the `register_*()` functions. Top-level configs that should be present in
every fairseq application are placed in the
[global](fairseq/dataclass/configs.py) config file and added to the
`FairseqConfig` object.
Each dataclass is a plain-old-data object, similar to a `NamedTuple`. These
classes are decorated with a `@dataclass` decorator, and typically inherit from
`FairseqDataclass` (which adds some functionality for backward compatibility).
Each field must have a type, and generally has metadata (such as a help string)
and a default value. Only primitive types or other config objects are allowed as
data types for each field.
#### Example:
```python
from dataclasses import dataclass, field
from fairseq.dataclass import FairseqDataclass
@dataclass
class InteractiveConfig(FairseqDataclass):
buffer_size: int = field(
default=0,
metadata={
"help": "read this many sentences into a buffer before processing them"
},
)
input: str = field(
default="-",
metadata={"help": "file to read from; use - for stdin"},
)
```
### Inherting values
Some components require sharing a value. For example, a learning rate scheduler
and an optimizer may both need to know the initial learning rate value. One can
declare a field that, by default, will inherit its value from another config
node in the same hierarchy:
```python
@dataclass
FairseqAdamConfig(FairseqDataclass):
...
lr: List[float] = II("optimization.lr")
...
```
`II("optimization.lr")` is syntactic sugar for `"${optimization.lr}"`, which is
the value one can use in a YAML config file or through command line to achieve
the same effect. Note that this assumes that there is an "optimization" config
object in the root config and it has a field called "lr".
### Tasks and Models
Creating Tasks and Models works same as before, except that legacy
implementations now inherit from `LegacyFairseq*` base classes, while new
components inherit from `FairseqTask` and `FairseqModel` and provide a dataclass
to the `register_*()` functions.
#### Task example:
```python
@dataclass
class LanguageModelingConfig(FairseqDataclass):
data: Optional[str] = field(
default=None, metadata={"help": "path to data directory"}
)
...
@register_task("language_modeling", dataclass=LanguageModelingConfig)
class LanguageModelingTask(FairseqTask):
...
@classmethod
def setup_task(cls, cfg: LanguageModelingConfig):
...
```
#### Model example:
```python
@dataclass
class TransformerLanguageModelConfig(FairseqDataclass):
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="relu", metadata={"help": "activation function to use"}
)
dropout: float = field(default=0.1, metadata={"help": "dropout probability"})
...
@register_model("transformer_lm", dataclass=TransformerLanguageModelConfig)
class TransformerLanguageModel(FairseqLanguageModel):
...
@classmethod
def build_model(cls, cfg: TransformerLanguageModelConfig, task: FairseqTask):
...
```
### Other components
Other components work as before, but they now take their configuration dataclass
as the only constructor argument:
```python
@dataclass
class MosesTokenizerConfig(FairseqDataclass):
source_lang: str = field(default="en", metadata={"help": "source language"})
...
@register_tokenizer("moses", dataclass=MosesTokenizerConfig)
class MosesTokenizer(object):
def __init__(self, cfg: MosesTokenizerConfig):
...
```
Note that if you are adding a new registry for a new set of components, you need
to add it to the `FairseqConfig` object in `fairseq/dataclass/configs.py`:
```python
@dataclass
class FairseqConfig(object):
...
my_new_registry: Any = None
```
## Training with `fairseq-hydra-train`
To fully take advantage of configuration flexibility offered by Hydra, you may
want to train new models using the `fairseq-hydra-train` entry point. Legacy CLI
tools such as `fairseq-train` will remain supported for the foreseeable future
but will be deprecated eventually.
On startup, Hydra will create a configuration object that contains a hierarchy
of all the necessary dataclasses populated with their default values in the
code. The default values are overwritten by values found in YAML files in
`fairseq/config` directory (which currently sets minimal defaults) and then
further overwritten by values provided through command line arguments.
Some of the most common use cases are shown below:
### 1. Override default values through command line:
```shell script
$ fairseq-hydra-train \
distributed_training.distributed_world_size=1 \
dataset.batch_size=2 \
task.data=data-bin \
model=transformer_lm/transformer_lm_gpt \
task=language_modeling \
optimization.max_update=5000
```
Note that along with explicitly providing values for parameters such as
`dataset.batch_size`, this also tells Hydra to overlay configuration found in
`fairseq/config/model/transformer_lm/transformer_lm_gpt.yaml` over the default
values in the dataclass. If you want to train a model without specifying a
particular architecture you can simply specify `model=transformer_lm`. This only
works for migrated tasks and models.
### 2. Replace bundled configs with an external config:
```shell script
$ fairseq-hydra-train \
--config-dir /path/to/external/configs \
--config-name wiki103
```
where `/path/to/external/configs/wiki103.yaml` contains:
```yaml
# @package _group_
model:
_name: transformer_lm
distributed_training:
distributed_world_size: 1
dataset:
batch_size: 2
task:
_name: language_modeling
data: /path/to/data
add_bos_token: false
max_target_positions: 1024
optimization:
max_update: 50000
lr: [ 0.25 ]
criterion: cross_entropy
optimizer: adam
lr_scheduler:
_name: cosine
```
Note that here bundled configs from `fairseq/config` directory are not used,
however the defaults from each dataclass will still be used (unless overwritten
by your external config).
Additionally you can choose to break up your configs by creating a directory
structure in the same location as your main config file, with the names of the
top-level fields (such as "model", "dataset", etc), and placing config files
with meaningful names that would populate that specific section of your
top-level config file (for example, you might have
`model/small_transformer_lm.yaml`, `model/big_transformer_lm.yaml`, etc). You
can then specify the correct configuration via command line, defaults in the
main config, or even launch all of them as a sweep (see Hydra documentation on
how to do this).
### 3. Add an external config directory to Hydra search path:
This allows combining default configuration (including using any bundled config
files), while specifying your own config files for some parts of the
configuration.
```shell script
$ fairseq-hydra-train \
distributed_training.distributed_world_size=1 \
dataset.batch_size=2 \
task.data=/path/to/data/ \
model=transformer_lm/2_layers \
task=language_modeling \
optimization.max_update=5000 \
--config-dir /path/to/external/configs
```
where `/path/to/external/configs` has the following structure:
```
.
+-- model
| +-- transformer_lm
| | +-- 2_layers.yaml
```
and `2_layers.yaml` contains a copy of `transformer_lm_gpt.yaml` but with
`decoder_layers` set to 2. You can add other configs to configure other
components as well.
| COCO-LM/fairseq/docs/hydra_integration.md/0 | {
"file_path": "COCO-LM/fairseq/docs/hydra_integration.md",
"repo_id": "COCO-LM",
"token_count": 2942
} | 164 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torch.optim import Adagrad
from fairseq.optim import LegacyFairseqOptimizer, register_optimizer
@register_optimizer("adagrad_with_grad_clip")
class FairseqAdagradWithGradClip(LegacyFairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = AdagradWithGradClip(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
# fmt: off
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
help='weight decay')
parser.add_argument('--adagrad-clip', default=0.0, type=float, metavar='D',
help='internal grad clip')
# fmt: on
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.args.lr[0],
"weight_decay": self.args.weight_decay,
"grad_clip": self.args.adagrad_clip,
}
@property
def supports_flat_params(self):
return False
def _clip_grad(clr, grad, group_grad_clip):
if group_grad_clip > 0:
norm = grad.norm(2).item()
if norm > group_grad_clip:
clr *= group_grad_clip / (norm + 1e-10)
return clr
class AdagradWithGradClip(Adagrad):
"""Adagrad algorithm with custom gradient clipping"""
def __init__(
self,
params,
lr=1e-2,
lr_decay=0,
weight_decay=0,
initial_accumulator_value=0,
grad_clip=0,
):
Adagrad.__init__(
self,
params,
lr=lr,
lr_decay=lr_decay,
weight_decay=weight_decay,
initial_accumulator_value=initial_accumulator_value,
)
self.defaults["grad_clip"] = grad_clip
self.param_groups[0].setdefault("grad_clip", grad_clip)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
state["step"] += 1
if group["weight_decay"] != 0:
if p.grad.data.is_sparse:
raise RuntimeError(
"weight_decay option is "
"not compatible with sparse "
"gradients"
)
grad = grad.add(group["weight_decay"], p.data)
clr = group["lr"] / (1 + (state["step"] - 1) * group["lr_decay"])
# clip
clr = _clip_grad(clr=clr, grad=grad, group_grad_clip=group["grad_clip"])
if grad.is_sparse:
# the update is non-linear so indices must be unique
grad = grad.coalesce()
grad_indices = grad._indices()
grad_values = grad._values()
size = grad.size()
def make_sparse(values):
constructor = grad.new
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor().resize_as_(grad)
return constructor(grad_indices, values, size)
state["sum"].add_(make_sparse(grad_values.pow(2)))
std = state["sum"]._sparse_mask(grad)
std_values = std._values().sqrt_().add_(1e-10)
p.data.add_(-clr, make_sparse(grad_values / std_values))
else:
state["sum"].addcmul_(1, grad, grad)
std = state["sum"].sqrt().add_(1e-10)
p.data.addcdiv_(-clr, grad, std)
return loss
| COCO-LM/fairseq/examples/adaptive_span/adagrad_with_grad_clip.py/0 | {
"file_path": "COCO-LM/fairseq/examples/adaptive_span/adagrad_with_grad_clip.py",
"repo_id": "COCO-LM",
"token_count": 2256
} | 165 |
# Neural Machine Translation with Byte-Level Subwords
https://arxiv.org/abs/1909.03341
We provide an implementation of byte-level byte-pair encoding (BBPE), taking IWSLT 2017 Fr-En translation as
example.
## Data
Get data and generate fairseq binary dataset:
```bash
bash ./get_data.sh
```
## Model Training
Train Transformer model with Bi-GRU embedding contextualization (implemented in `gru_transformer.py`):
```bash
# VOCAB=bytes
# VOCAB=chars
VOCAB=bbpe2048
# VOCAB=bpe2048
# VOCAB=bbpe4096
# VOCAB=bpe4096
# VOCAB=bpe16384
```
```bash
fairseq-train "data/bin_${VOCAB}" --task translation --user-dir examples/byte_level_bpe/gru_transformer \
--arch gru_transformer --encoder-layers 2 --decoder-layers 2 --dropout 0.3 --share-all-embeddings \
--optimizer adam --adam-betas '(0.9, 0.98)' \
--lr 5e-4 --lr-scheduler inverse_sqrt --warmup-updates 4000 \
--criterion label_smoothed_cross_entropy --label-smoothing 0.1 \
--log-format 'simple' --log-interval 100 --save-dir "checkpoints/${VOCAB}" \
--batch-size 100 --max-update 100000 --update-freq 2
```
## Generation
`fairseq-generate` requires bytes (BBPE) decoder to convert byte-level representation back to characters:
```bash
# BPE=--bpe bytes
# BPE=--bpe characters
BPE=--bpe byte_bpe --sentencepiece-model-path data/spm_bbpe2048.model
# BPE=--bpe sentencepiece --sentencepiece-model data/spm_bpe2048.model
# BPE=--bpe byte_bpe --sentencepiece-model-path data/spm_bbpe4096.model
# BPE=--bpe sentencepiece --sentencepiece-model data/spm_bpe4096.model
# BPE=--bpe sentencepiece --sentencepiece-model data/spm_bpe16384.model
```
```bash
fairseq-generate "data/bin_${VOCAB}" --task translation --user-dir examples/byte_level_bpe/gru_transformer \
--source-lang fr --gen-subset test --sacrebleu --path "checkpoints/${VOCAB}/checkpoint_last.pt" \
--tokenizer moses --moses-target-lang en ${BPE}
```
When using `fairseq-interactive`, bytes (BBPE) encoder/decoder is required to tokenize input data and detokenize model predictions:
```bash
fairseq-interactive "data/bin_${VOCAB}" --task translation --user-dir examples/byte_level_bpe/gru_transformer \
--path "checkpoints/${VOCAB}/checkpoint_last.pt" --input data/test.fr --tokenizer moses --moses-source-lang fr \
--moses-target-lang en ${BPE} --buffer-size 1000 --max-tokens 10000
```
## Results
| Vocabulary | Model | BLEU |
|:-------------:|:-------------:|:-------------:|
| Joint BPE 16k ([Kudo, 2018](https://arxiv.org/abs/1804.10959)) | 512d LSTM 2+2 | 33.81 |
| Joint BPE 16k | Transformer base 2+2 (w/ GRU) | 36.64 (36.72) |
| Joint BPE 4k | Transformer base 2+2 (w/ GRU) | 35.49 (36.10) |
| Joint BBPE 4k | Transformer base 2+2 (w/ GRU) | 35.61 (35.82) |
| Joint BPE 2k | Transformer base 2+2 (w/ GRU) | 34.87 (36.13) |
| Joint BBPE 2k | Transformer base 2+2 (w/ GRU) | 34.98 (35.43) |
| Characters | Transformer base 2+2 (w/ GRU) | 31.78 (33.30) |
| Bytes | Transformer base 2+2 (w/ GRU) | 31.57 (33.62) |
## Citation
```
@misc{wang2019neural,
title={Neural Machine Translation with Byte-Level Subwords},
author={Changhan Wang and Kyunghyun Cho and Jiatao Gu},
year={2019},
eprint={1909.03341},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
## Contact
Changhan Wang ([changhan@fb.com](mailto:changhan@fb.com)),
Kyunghyun Cho ([kyunghyuncho@fb.com](mailto:kyunghyuncho@fb.com)),
Jiatao Gu ([jgu@fb.com](mailto:jgu@fb.com))
| COCO-LM/fairseq/examples/byte_level_bpe/README.md/0 | {
"file_path": "COCO-LM/fairseq/examples/byte_level_bpe/README.md",
"repo_id": "COCO-LM",
"token_count": 1325
} | 166 |
#!/bin/bash
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
source_lang=kk_KZ
target_lang=en_XX
MODEL=criss_checkpoints/criss.3rd.pt
SPM=criss_checkpoints/sentence.bpe.model
SPLIT=test
LANG_DICT=criss_checkpoints/lang_dict.txt
ENCODER_ANALYSIS=sentence_retrieval/encoder_analysis.py
SAVE_ENCODER=save_encoder.py
ENCODER_SAVE_ROOT=sentence_embeddings/$MODEL
DATA_DIR=data_tmp
INPUT_DIR=$DATA_DIR/${source_lang}-${target_lang}-tatoeba
ENCODER_SAVE_DIR=${ENCODER_SAVE_ROOT}/${source_lang}-${target_lang}
mkdir -p $ENCODER_SAVE_DIR/${target_lang}
mkdir -p $ENCODER_SAVE_DIR/${source_lang}
# Save encoder outputs for source sentences
python $SAVE_ENCODER \
${INPUT_DIR} \
--path ${MODEL} \
--task translation_multi_simple_epoch \
--lang-dict ${LANG_DICT} \
--gen-subset ${SPLIT} \
--bpe 'sentencepiece' \
--lang-pairs ${source_lang}-${target_lang} \
-s ${source_lang} -t ${target_lang} \
--sentencepiece-model ${SPM} \
--remove-bpe 'sentencepiece' \
--beam 1 \
--lang-tok-style mbart \
--encoder-save-dir ${ENCODER_SAVE_DIR}/${source_lang}
# Save encoder outputs for target sentences
python $SAVE_ENCODER \
${INPUT_DIR} \
--path ${MODEL} \
--lang-dict ${LANG_DICT} \
--task translation_multi_simple_epoch \
--gen-subset ${SPLIT} \
--bpe 'sentencepiece' \
--lang-pairs ${target_lang}-${source_lang} \
-t ${source_lang} -s ${target_lang} \
--sentencepiece-model ${SPM} \
--remove-bpe 'sentencepiece' \
--beam 1 \
--lang-tok-style mbart \
--encoder-save-dir ${ENCODER_SAVE_DIR}/${target_lang}
# Analyze sentence retrieval accuracy
python $ENCODER_ANALYSIS --langs "${source_lang},${target_lang}" ${ENCODER_SAVE_DIR}
| COCO-LM/fairseq/examples/criss/sentence_retrieval/sentence_retrieval_tatoeba.sh/0 | {
"file_path": "COCO-LM/fairseq/examples/criss/sentence_retrieval/sentence_retrieval_tatoeba.sh",
"repo_id": "COCO-LM",
"token_count": 729
} | 167 |
# LASER Language-Agnostic SEntence Representations
LASER is a library to calculate and use multilingual sentence embeddings.
You can find more information about LASER and how to use it on the official [LASER repository](https://github.com/facebookresearch/LASER).
This folder contains source code for training LASER embeddings.
## Prepare data and configuration file
Binarize your data with fairseq, as described [here](https://fairseq.readthedocs.io/en/latest/getting_started.html#data-pre-processing).
Create a json config file with this format:
```
{
"src_vocab": "/path/to/spm.src.cvocab",
"tgt_vocab": "/path/to/spm.tgt.cvocab",
"train": [
{
"type": "translation",
"id": 0,
"src": "/path/to/srclang1-tgtlang0/train.srclang1",
"tgt": "/path/to/srclang1-tgtlang0/train.tgtlang0"
},
{
"type": "translation",
"id": 1,
"src": "/path/to/srclang1-tgtlang1/train.srclang1",
"tgt": "/path/to/srclang1-tgtlang1/train.tgtlang1"
},
{
"type": "translation",
"id": 0,
"src": "/path/to/srclang2-tgtlang0/train.srclang2",
"tgt": "/path/to/srclang2-tgtlang0/train.tgtlang0"
},
{
"type": "translation",
"id": 1,
"src": "/path/to/srclang2-tgtlang1/train.srclang2",
"tgt": "/path/to/srclang2-tgtlang1/train.tgtlang1"
},
...
],
"valid": [
{
"type": "translation",
"id": 0,
"src": "/unused",
"tgt": "/unused"
}
]
}
```
where paths are paths to binarized indexed fairseq dataset files.
`id` represents the target language id.
## Training Command Line Example
```
fairseq-train \
/path/to/configfile_described_above.json \
--user-dir examples/laser/laser_src \
--log-interval 100 --log-format simple \
--task laser --arch laser_lstm \
--save-dir . \
--optimizer adam \
--lr 0.001 \
--lr-scheduler inverse_sqrt \
--clip-norm 5 \
--warmup-updates 90000 \
--update-freq 2 \
--dropout 0.0 \
--encoder-dropout-out 0.1 \
--max-tokens 2000 \
--max-epoch 50 \
--encoder-bidirectional \
--encoder-layers 5 \
--encoder-hidden-size 512 \
--decoder-layers 1 \
--decoder-hidden-size 2048 \
--encoder-embed-dim 320 \
--decoder-embed-dim 320 \
--decoder-lang-embed-dim 32 \
--warmup-init-lr 0.001 \
--disable-validation
```
## Applications
We showcase several applications of multilingual sentence embeddings
with code to reproduce our results (in the directory "tasks").
* [**Cross-lingual document classification**](https://github.com/facebookresearch/LASER/tree/master/tasks/mldoc) using the
[*MLDoc*](https://github.com/facebookresearch/MLDoc) corpus [2,6]
* [**WikiMatrix**](https://github.com/facebookresearch/LASER/tree/master/tasks/WikiMatrix)
Mining 135M Parallel Sentences in 1620 Language Pairs from Wikipedia [7]
* [**Bitext mining**](https://github.com/facebookresearch/LASER/tree/master/tasks/bucc) using the
[*BUCC*](https://comparable.limsi.fr/bucc2018/bucc2018-task.html) corpus [3,5]
* [**Cross-lingual NLI**](https://github.com/facebookresearch/LASER/tree/master/tasks/xnli)
using the [*XNLI*](https://www.nyu.edu/projects/bowman/xnli/) corpus [4,5,6]
* [**Multilingual similarity search**](https://github.com/facebookresearch/LASER/tree/master/tasks/similarity) [1,6]
* [**Sentence embedding of text files**](https://github.com/facebookresearch/LASER/tree/master/tasks/embed)
example how to calculate sentence embeddings for arbitrary text files in any of the supported language.
**For all tasks, we use exactly the same multilingual encoder, without any task specific optimization or fine-tuning.**
## References
[1] Holger Schwenk and Matthijs Douze,
[*Learning Joint Multilingual Sentence Representations with Neural Machine Translation*](https://aclanthology.info/papers/W17-2619/w17-2619),
ACL workshop on Representation Learning for NLP, 2017
[2] Holger Schwenk and Xian Li,
[*A Corpus for Multilingual Document Classification in Eight Languages*](http://www.lrec-conf.org/proceedings/lrec2018/pdf/658.pdf),
LREC, pages 3548-3551, 2018.
[3] Holger Schwenk,
[*Filtering and Mining Parallel Data in a Joint Multilingual Space*](http://aclweb.org/anthology/P18-2037)
ACL, July 2018
[4] Alexis Conneau, Guillaume Lample, Ruty Rinott, Adina Williams, Samuel R. Bowman, Holger Schwenk and Veselin Stoyanov,
[*XNLI: Cross-lingual Sentence Understanding through Inference*](https://aclweb.org/anthology/D18-1269),
EMNLP, 2018.
[5] Mikel Artetxe and Holger Schwenk,
[*Margin-based Parallel Corpus Mining with Multilingual Sentence Embeddings*](https://arxiv.org/abs/1811.01136)
arXiv, Nov 3 2018.
[6] Mikel Artetxe and Holger Schwenk,
[*Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond*](https://arxiv.org/abs/1812.10464)
arXiv, Dec 26 2018.
[7] Holger Schwenk, Vishrav Chaudhary, Shuo Sun, Hongyu Gong and Paco Guzman,
[*WikiMatrix: Mining 135M Parallel Sentences in 1620 Language Pairs from Wikipedia*](https://arxiv.org/abs/1907.05791)
arXiv, July 11 2019.
[8] Holger Schwenk, Guillaume Wenzek, Sergey Edunov, Edouard Grave and Armand Joulin
[*CCMatrix: Mining Billions of High-Quality Parallel Sentences on the WEB*](https://arxiv.org/abs/1911.04944)
| COCO-LM/fairseq/examples/laser/README.md/0 | {
"file_path": "COCO-LM/fairseq/examples/laser/README.md",
"repo_id": "COCO-LM",
"token_count": 1998
} | 168 |
# Reducing Transformer Depth on Demand with Structured Dropout (Fan et al., 2019)
This page contains information for how to train models with LayerDrop, based on this [paper](https://arxiv.org/abs/1909.11556).
## Citation:
If you found this technique useful, please cite our paper:
```bibtex
@article{fan2019reducing,
title={Reducing Transformer Depth on Demand with Structured Dropout},
author={Fan, Angela and Grave, Edouard and Joulin, Armand},
journal={arXiv preprint arXiv:1909.11556},
year={2019}
}
```
## Pre-trained models
Model | Description | Download
---|---|---
`layerdrop_wmt_en_de_12_6` | Transformer + LayerDrop 0.2 trained on WMT16 en-de with 12 encoder and 6 decoder layers | [layerdrop_wmt_en_de_12_6.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/layerdrop_wmt_en_de_12_6.tar.gz)
`roberta_layerdrop.base` | RoBERTa Base + LayerDrop 0.2 | [roberta_layerdrop.base.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/roberta_layerdrop.base.qnli.tar.gz)
`roberta_layerdrop.large` | RoBERTa Large + LayerDrop 0.2 | [roberta_layerdrop.large.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/roberta_layerdrop.large.tar.gz)
`roberta_layerdrop.large.mnli` | `roberta_layerdrop.large` finetuned on [MNLI](http://www.nyu.edu/projects/bowman/multinli) | [roberta_layerdrop.large.mnli.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/roberta_layerdrop.large.mnli.tar.gz)
`roberta_layerdrop.large.qnli` | `roberta_layerdrop.large` finetuned on [QNLI](https://arxiv.org/abs/1804.07461) | [roberta_layerdrop.large.mnli.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/roberta_layerdrop.large.qnli.tar.gz)
Evaluate performance of these pre-trained models:
```bash
# Example for Machine Translation
fairseq-generate /path/to/bped/wmt/data --path nmt_checkpoint.pt \
--beam 8 --lenpen 0.4 \
--batch-size 64 \
--remove-bpe \
--gen-subset test > wmt16_gen.txt
bash scripts/compound_split_bleu.sh wmt16_gen.txt
# prints BLEU4 = 30.17
```
```python
# Example for RoBERTa + LayerDrop finetuned on MNLI:
from fairseq.models.roberta import RobertaModel
roberta_layerdrop = RobertaModel.from_pretrained(
'/path/to/MNLI/model',
checkpoint_file='mnli_checkpoint.pt',
data_name_or_path='/path/to/MNLI/data/MNLI-bin'
)
label_map = {0: 'contradiction', 2: 'neutral', 1: 'entailment'}
ncorrect, nsamples = 0, 0
roberta_layerdrop.cuda()
roberta_layerdrop.eval()
with open('/path/to/MNLI/data/dev_matched.tsv') as fin:
fin.readline()
for index, line in enumerate(fin):
tokens = line.strip().split('\t')
sent1, sent2, target = tokens[8], tokens[9], tokens[-1]
tokens = roberta_layerdrop.encode(sent1, sent2)
prediction = roberta_layerdrop.predict('sentence_classification_head', tokens).argmax().item()
prediction_label = label_map[prediction]
ncorrect += int(prediction_label == target)
nsamples += 1
print('| Accuracy: ', float(ncorrect)/float(nsamples))
# prints | Accuracy: 0.9026999490575649
# Example for RoBERTa + LayerDrop finetuned on QNLI:
roberta = RobertaModel.from_pretrained(
'/path/to/QNLI/model',
checkpoint_file='qnli_checkpoint.pt',
data_name_or_path='/path/to/QNLI/data/QNLI-bin'
)
label_fn = lambda label: roberta.task.label_dictionary.string(
[label + roberta.task.target_dictionary.nspecial]
)
ncorrect, nsamples = 0, 0
roberta.cuda()
roberta.eval()
with open('/path/to/QNLI/data/dev.tsv') as fin:
fin.readline()
for index, line in enumerate(fin):
tokens = line.strip().split('\t')
sent1, sent2, target = tokens[1], tokens[2], tokens[3]
tokens = roberta.encode(sent1, sent2)
prediction = roberta.predict('sentence_classification_head', tokens).argmax().item()
prediction_label = label_fn(prediction)
ncorrect += int(prediction_label == target)
nsamples += 1
print('| Accuracy: ', float(ncorrect)/float(nsamples))
# prints | Accuracy: 0.9480139117700896
```
## Example usage
To train a model with LayerDrop, add the following flags. We recommend 0.2, a value that worked well in our experiments. For Language Models that are decoder-only, you need only the decoder flag. For RoBERTa, an encoder, you need only the encoder flag. The encoder and decoder LayerDrop values can be set differently.
```
--encoder-layerdrop 0.2 --decoder-layerdrop 0.2
```
To prune a model that has been trained with LayerDrop, add the following flags followed by a comma separated list of which layers you would like to keep.
```
--encoder-layers-to-keep 0,2,4,6,8,10,12,14 --decoder-layers-to-keep 0,2,4,6,8,10,12,14
```
Setting these flags should print a message such as:
```
| Pruning model to specified layer configuration
```
You should also see a smaller number of parameters in the model, for example the 16-Layer Transformer Language Model prints:
```
num. model params: 246933504
```
while a model pruned to 8 Layers prints:
```
num. model params: 146163712
```
If you would like to pick up training with a model that has been pruned, simply adding these flags is sufficient. If you would like to use a script that only does evaluation (no training), you may need to pass an override command. A specific example would be for language modeling:
```bash
fairseq-eval-lm /path/to/wikitext-103 \
--path /path/to/model/checkpoint.pt \
--model-overrides "{'decoder_layers_to_keep':'0,2,4,6,8,10,12,14'}"
```
This model override command overrides the training parameters and updates the model arguments so that the pruned model is run instead of the full model.
## Reproduce Paper Results
Looking to reproduce the results in the paper?
1. For Translation on WMT16 en-de, we followed this setting [here](https://github.com/pytorch/fairseq/blob/master/examples/scaling_nmt/README.md)
2. To train RoBERTa, we followed this setting [here](https://github.com/pytorch/fairseq/tree/master/examples/roberta)
3. To train Language Models on Wikitext-103, we followed this setting [here](https://github.com/pytorch/fairseq/tree/master/examples/language_model)
## Tips
1. If you would like to train large models with better performance, LayerDrop should be set to a smaller value such as 0.1 or 0.2. Too much LayerDrop will mean the model has too much regularization, so may not reach the best performance. Since LayerDrop adds regularization, you may achieve the best performance by slightly reducing the amount of standard dropout (for example, reduce by 0.1).
2. If you would like to train large models to be pruned and made smaller, LayerDrop should be set to a larger value such as 0.5 if you want to prune very aggressively (such as removing half the network or more). If you would like to prune fewer layers away, LayerDrop can be set to a smaller value such as 0.2. Our experiments were conducted with low values of LayerDrop (such as 0.1 and 0.2), for reference.
3. When pruning layers at inference time, it is best to spread out the layers remaining so they are evenly spaced throughout the network. For example, if you want to remove 50% of the network, keeping every other layer is good.
## FAQ
1. How did the sharing layers experiment work? In an appendix (https://openreview.net/pdf?id=SylO2yStDr) we added an experiment on Wikitext-103 language modeling that combined LayerDrop with Weight Sharing. We shared chunks of 2 layers such that every other layer had shared weights. For example, if our network has layers 1 through 6, then layer 1 and 2 are shared, layer 3 and 4 are shared, and layer 5 and 6 are shared.
2. LayerDrop hasn't been helping in my setting? During training time, LayerDrop can help regularize your network. This is most important if your network is already overfitting - if your network is underfitting, it is possible LayerDrop is adding too much regularization. We recommend using smaller values (such as 0.1 or 0.2) and also decreasing the quantity of standard dropout (for example, reduce by 0.1).
3. Can you train a model without LayerDrop and finetune with LayerDrop (e.g. for BERT)? In our experiments, we did not see great performance. Models such as RoBERTa have trained for a long time in the pre-training setting, so only finetuning with LayerDrop for a few epochs on a downstream task such as MNLI does not achieve the robustness required for successful pruning.
## Having an issue or have a question?
Please open an issue in this repository with the details of your question. Thanks!
| COCO-LM/fairseq/examples/layerdrop/README.md/0 | {
"file_path": "COCO-LM/fairseq/examples/layerdrop/README.md",
"repo_id": "COCO-LM",
"token_count": 2747
} | 169 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import pandas as pd
import sys
WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None)
if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip():
print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."')
sys.exit(-1)
def load_langs(path):
with open(path) as fr:
langs = [l.strip() for l in fr]
return langs
def load_sentences(raw_data, split, direction):
src, tgt = direction.split('-')
src_path = f"{raw_data}/{split}.{direction}.{src}"
tgt_path = f"{raw_data}/{split}.{direction}.{tgt}"
if os.path.exists(src_path) and os.path.exists(tgt_path):
return [(src, open(src_path).read().splitlines()), (tgt, open(tgt_path).read().splitlines())]
else:
return []
def swap_direction(d):
src, tgt = d.split('-')
return f'{tgt}-{src}'
def get_all_test_data(raw_data, directions, split='test'):
test_data = [
x
for dd in directions
for d in [dd, swap_direction(dd)]
for x in load_sentences(raw_data, split, d)
]
# all_test_data = {s for _, d in test_data for s in d}
all_test_data = {}
for lang, d in test_data:
for s in d:
s = s.strip()
lgs = all_test_data.get(s, set())
lgs.add(lang)
all_test_data[s] = lgs
return all_test_data, test_data
def check_train_sentences(src_path, tgt_path, direction, all_test_data, mess_up_train={}):
# src, tgt = direction.split('-')
print(f'check training data for {direction} in {src_path} and {tgt_path}')
size = 0
overlapped_size_counted_dup = 0
if not os.path.exists(tgt_path) or not os.path.exists(src_path):
return mess_up_train, size, overlapped_size_counted_dup
with open(src_path) as f, open(tgt_path) as g:
for src_line, tgt_line in zip(f, g):
s = src_line.strip()
t = tgt_line.strip()
size += 1
if s in all_test_data:
langs = mess_up_train.get(s, set())
langs.add(direction)
mess_up_train[s] = langs
overlapped_size_counted_dup += 1
if t in all_test_data:
langs = mess_up_train.get(t, set())
langs.add(direction)
mess_up_train[t] = langs
overlapped_size_counted_dup += 1
print(f'{direction}: size={size}, overlapped={overlapped_size_counted_dup}')
return mess_up_train, size, overlapped_size_counted_dup
def check_train_all(raw_data, directions, all_test_data):
mess_up_train = {}
data_sizes = {}
# raw_data = '~chau/data-bin/MineBART/multilingual_mined_100M/en_XX/et_EE-en_XX/all.{en_XX, et_EE}'
print(f'checking training data againsts # {len(all_test_data)} sentences')
print(f'example test data: ', [s for i, s in enumerate(all_test_data.keys()) if i < 10])
for direction in directions:
src, tgt = direction.split('-')
path = f'{raw_data}/en_XX/{direction}/all'
src_path = f'{path}.{src}'
tgt_path = f'{path}.{tgt}'
print(f'checking {src_path} {tgt_path}')
_, size, overlapped_size_counted_dup = check_train_sentences(src_path, tgt_path, direction, all_test_data, mess_up_train)
data_sizes[direction] = (size, overlapped_size_counted_dup)
return mess_up_train, data_sizes
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--folder", type=str, required=True,
help="the data folder ")
parser.add_argument("--test-data", type=str, required=True,
help="the test data folder ")
parser.add_argument('--directions', type=str, default=None, required=False)
args = parser.parse_args()
directions = args.directions.split(',')
directions = sorted(set(directions))
results = []
# print(f'checking where {args.split} split data are in training')
# print(f'direction\tcommon_count\tsrc common\ttgt common\tfrom_size\tto_size')
raw_data = args.folder
all_test_data, test_data = get_all_test_data(args.test_data, directions, split='test')
mess_up_train, data_sizes = check_train_all(raw_data, directions, all_test_data)
print(data_sizes)
if __name__ == "__main__":
main()
| COCO-LM/fairseq/examples/multilingual/data_scripts/check_valid_test_overlaps.py/0 | {
"file_path": "COCO-LM/fairseq/examples/multilingual/data_scripts/check_valid_test_overlaps.py",
"repo_id": "COCO-LM",
"token_count": 2019
} | 170 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/bin/python
import fasttext
from multiprocessing import Pool
import contextlib
import sys
import argparse
from functools import partial
import io
model = None
def init(model_path):
global model
model = fasttext.load_model(model_path)
def pred(lines):
return lines, [model.predict(line.strip())[0][0][9:] for line in lines]
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, required=True,
help="model to load")
parser.add_argument("--inputs", nargs="+", default=['-'],
help="input files to filter")
parser.add_argument("--langs", nargs="+", required=True,
help="lang ids of each input file")
parser.add_argument("--outputs", nargs="+", default=['-'],
help="path to save lid filtered outputs")
parser.add_argument("--num-workers", type=int, metavar="N", default=10,
help="number of processes in parallel")
args = parser.parse_args()
assert len(args.inputs) == len(args.langs) and len(args.inputs) == len(args.outputs)
with contextlib.ExitStack() as stack:
inputs = [
stack.enter_context(open(input, "r", encoding="utf-8", newline="\n", errors="replace"))
if input != "-" else io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8', errors="replace")
for input in args.inputs
]
outputs = [
stack.enter_context(open(output, "w", encoding="utf-8", newline="\n"))
if output != "-" else sys.stdout
for output in args.outputs
]
with Pool(args.num_workers, initializer=partial(init, args.model)) as p:
skip_cnt = 0
for lines, preds in p.imap(pred, list(zip(*inputs)), chunksize=500):
if not all(a == b for a, b in zip(preds, args.langs)):
skip_cnt += 1
continue
for line, output_h in zip(lines, outputs):
print(line.strip(), file=output_h)
print(f"Skipped {skip_cnt} lines.")
if __name__ == "__main__":
main()
| COCO-LM/fairseq/examples/multilingual/data_scripts/utils/fasttext_multi_filter.py/0 | {
"file_path": "COCO-LM/fairseq/examples/multilingual/data_scripts/utils/fasttext_multi_filter.py",
"repo_id": "COCO-LM",
"token_count": 1020
} | 171 |
# Paraphrasing with round-trip translation and mixture of experts
Machine translation models can be used to paraphrase text by translating it to
an intermediate language and back (round-trip translation).
This example shows how to paraphrase text by first passing it to an
English-French translation model, followed by a French-English [mixture of
experts translation model](/examples/translation_moe).
##### 0. Setup
Clone fairseq from source and install necessary dependencies:
```bash
git clone https://github.com/pytorch/fairseq.git
cd fairseq
pip install --editable .
pip install sacremoses sentencepiece
```
##### 1. Download models
```bash
wget https://dl.fbaipublicfiles.com/fairseq/models/paraphraser.en-fr.tar.gz
wget https://dl.fbaipublicfiles.com/fairseq/models/paraphraser.fr-en.hMoEup.tar.gz
tar -xzvf paraphraser.en-fr.tar.gz
tar -xzvf paraphraser.fr-en.hMoEup.tar.gz
```
##### 2. Paraphrase
```bash
python examples/paraphraser/paraphrase.py \
--en2fr paraphraser.en-fr \
--fr2en paraphraser.fr-en.hMoEup
# Example input:
# The new date for the Games, postponed for a year in response to the coronavirus pandemic, gives athletes time to recalibrate their training schedules.
# Example outputs:
# Delayed one year in response to the coronavirus pandemic, the new date of the Games gives athletes time to rebalance their training schedule.
# The new date of the Games, which was rescheduled one year in response to the coronavirus (CV) pandemic, gives athletes time to rebalance their training schedule.
# The new date of the Games, postponed one year in response to the coronavirus pandemic, provides athletes with time to rebalance their training schedule.
# The Games' new date, postponed one year in response to the coronavirus pandemic, gives athletes time to rebalance their training schedule.
# The new Games date, postponed one year in response to the coronavirus pandemic, gives the athletes time to rebalance their training schedule.
# The new date of the Games, which was postponed one year in response to the coronavirus pandemic, gives the athletes time to rebalance their training schedule.
# The new date of the Games, postponed one year in response to the coronavirus pandemic, gives athletes time to rebalance their training schedule.
# The new date of the Games, postponed one year in response to the coronavirus pandemic, gives athletes time to re-balance their training schedule.
# The new date of the Games, postponed one year in response to the coronavirus pandemic, gives the athletes time to rebalance their schedule of training.
# The new date of the Games, postponed one year in response to the pandemic of coronavirus, gives the athletes time to rebalance their training schedule.
```
| COCO-LM/fairseq/examples/paraphraser/README.md/0 | {
"file_path": "COCO-LM/fairseq/examples/paraphraser/README.md",
"repo_id": "COCO-LM",
"token_count": 757
} | 172 |
# Finetuning RoBERTa on Commonsense QA
We follow a similar approach to [finetuning RACE](../README.race.md). Specifically
for each question we construct five inputs, one for each of the five candidate
answer choices. Each input is constructed by concatenating the question and
candidate answer. We then encode each input and pass the resulting "[CLS]"
representations through a fully-connected layer to predict the correct answer.
We train with a standard cross-entropy loss.
We also found it helpful to prepend a prefix of `Q:` to the question and `A:` to
the answer. The complete input format is:
```
<s> Q: Where would I not want a fox? </s> A: hen house </s>
```
Our final submission is based on a hyperparameter search over the learning rate
(1e-5, 2e-5, 3e-5), batch size (8, 16), number of training steps (2000, 3000,
4000) and random seed. We selected the model with the best performance on the
development set after 100 trials.
### 1) Download data from the Commonsense QA website (https://www.tau-nlp.org/commonsenseqa)
```bash
bash examples/roberta/commonsense_qa/download_cqa_data.sh
```
### 2) Finetune
```bash
MAX_UPDATES=3000 # Number of training steps.
WARMUP_UPDATES=150 # Linearly increase LR over this many steps.
LR=1e-05 # Peak LR for polynomial LR scheduler.
MAX_SENTENCES=16 # Batch size.
SEED=1 # Random seed.
ROBERTA_PATH=/path/to/roberta/model.pt
DATA_DIR=data/CommonsenseQA
# we use the --user-dir option to load the task from
# the examples/roberta/commonsense_qa directory:
FAIRSEQ_PATH=/path/to/fairseq
FAIRSEQ_USER_DIR=${FAIRSEQ_PATH}/examples/roberta/commonsense_qa
CUDA_VISIBLE_DEVICES=0 fairseq-train --fp16 --ddp-backend=legacy_ddp \
$DATA_DIR \
--user-dir $FAIRSEQ_USER_DIR \
--restore-file $ROBERTA_PATH \
--reset-optimizer --reset-dataloader --reset-meters \
--no-epoch-checkpoints --no-last-checkpoints --no-save-optimizer-state \
--best-checkpoint-metric accuracy --maximize-best-checkpoint-metric \
--task commonsense_qa --init-token 0 --bpe gpt2 \
--arch roberta_large --max-positions 512 \
--dropout 0.1 --attention-dropout 0.1 --weight-decay 0.01 \
--criterion sentence_ranking --num-classes 5 \
--optimizer adam --adam-betas '(0.9, 0.98)' --adam-eps 1e-06 --clip-norm 0.0 \
--lr-scheduler polynomial_decay --lr $LR \
--warmup-updates $WARMUP_UPDATES --total-num-update $MAX_UPDATES \
--batch-size $MAX_SENTENCES \
--max-update $MAX_UPDATES \
--log-format simple --log-interval 25 \
--seed $SEED
```
The above command assumes training on 1 GPU with 32GB of RAM. For GPUs with
less memory, decrease `--batch-size` and increase `--update-freq`
accordingly to compensate.
### 3) Evaluate
```python
import json
import torch
from fairseq.models.roberta import RobertaModel
from examples.roberta import commonsense_qa # load the Commonsense QA task
roberta = RobertaModel.from_pretrained('checkpoints', 'checkpoint_best.pt', 'data/CommonsenseQA')
roberta.eval() # disable dropout
roberta.cuda() # use the GPU (optional)
nsamples, ncorrect = 0, 0
with open('data/CommonsenseQA/valid.jsonl') as h:
for line in h:
example = json.loads(line)
scores = []
for choice in example['question']['choices']:
input = roberta.encode(
'Q: ' + example['question']['stem'],
'A: ' + choice['text'],
no_separator=True
)
score = roberta.predict('sentence_classification_head', input, return_logits=True)
scores.append(score)
pred = torch.cat(scores).argmax()
answer = ord(example['answerKey']) - ord('A')
nsamples += 1
if pred == answer:
ncorrect += 1
print('Accuracy: ' + str(ncorrect / float(nsamples)))
# Accuracy: 0.7846027846027847
```
The above snippet is not batched, which makes it quite slow. See [instructions
for batched prediction with RoBERTa](https://github.com/pytorch/fairseq/tree/master/examples/roberta#batched-prediction).
| COCO-LM/fairseq/examples/roberta/commonsense_qa/README.md/0 | {
"file_path": "COCO-LM/fairseq/examples/roberta/commonsense_qa/README.md",
"repo_id": "COCO-LM",
"token_count": 1525
} | 173 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.criterions.label_smoothed_cross_entropy import label_smoothed_nll_loss
@register_criterion("label_smoothed_cross_entropy_r3f")
class LabelSmoothedCrossEntropyR3FCriterion(FairseqCriterion):
def __init__(
self, task, sentence_avg, label_smoothing, eps, r3f_lambda, noise_type
):
super().__init__(task)
self.sentence_avg = sentence_avg
self.label_smoothing = label_smoothing
self.eps = eps
self.r3f_lambda = r3f_lambda
self.noise_type = noise_type
if self.noise_type in {"normal"}:
self.noise_sampler = torch.distributions.normal.Normal(
loc=0.0, scale=self.eps
)
elif self.noise_type == "uniform":
self.noise_sampler = torch.distributions.uniform.Uniform(
low=-self.eps, high=self.eps
)
else:
raise Exception(f"unrecognized noise type {self.noise_type}")
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
# fmt: off
parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
help='epsilon for label smoothing, 0 means no label smoothing')
parser.add_argument('--eps', type=float, default=1e-5,
help='noise eps')
parser.add_argument('--r3f-lambda', type=float, default=1.0,
help='lambda for combining logistic loss and noisy KL loss')
parser.add_argument('--noise-type', type=str, default='normal',
choices=['normal', 'uniform'],
help='type of noises')
# fmt: on
def _get_symm_kl(self, noised_logits, input_logits):
return (
F.kl_div(
F.log_softmax(noised_logits, dim=-1, dtype=torch.float32),
F.softmax(input_logits, dim=-1, dtype=torch.float32),
None,
None,
"sum",
)
+ F.kl_div(
F.log_softmax(input_logits, dim=-1, dtype=torch.float32),
F.softmax(noised_logits, dim=-1, dtype=torch.float32),
None,
None,
"sum",
)
) / noised_logits.size(0)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
token_embeddings = model.encoder.embed_tokens(sample["net_input"]["src_tokens"])
input_logits, extra = model(**sample["net_input"])
loss, nll_loss = self.compute_loss(
model, (input_logits, extra), sample, reduce=reduce
)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
if model.training:
noise = self.noise_sampler.sample(sample_shape=token_embeddings.shape).to(
token_embeddings
)
noised_embeddings = token_embeddings.clone() + noise
noised_logits, _ = model(
**sample["net_input"], token_embeddings=noised_embeddings
)
symm_kl = self._get_symm_kl(noised_logits, input_logits)
if model.training:
symm_kl = symm_kl * sample_size
loss = loss + self.r3f_lambda * symm_kl
logging_output = {
"loss": loss.data,
"nll_loss": nll_loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
if model.training:
logging_output.update(
symm_kl=utils.item(symm_kl.data) if reduce else symm_kl.data
)
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1, 1)
loss, nll_loss = label_smoothed_nll_loss(
lprobs,
target,
self.label_smoothing,
ignore_index=self.padding_idx,
reduce=reduce,
)
return loss, nll_loss
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
nll_loss_sum = sum(log.get("nll_loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
symm_kl_sum = sum(log.get("symm_kl", 0) for log in logging_outputs)
metrics.log_scalar("symm_kl", symm_kl_sum / sample_size, sample_size, round=3)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar(
"nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| COCO-LM/fairseq/examples/rxf/rxf_src/label_smoothed_cross_entropy_r3f.py/0 | {
"file_path": "COCO-LM/fairseq/examples/rxf/rxf_src/label_smoothed_cross_entropy_r3f.py",
"repo_id": "COCO-LM",
"token_count": 2934
} | 174 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import register_scorer
from .scorer import SimulScorer
@register_scorer("text")
class SimulTextScorer(SimulScorer):
def __init__(self, args):
super().__init__(args)
self.data = {
"src": self._load_text_file(args.src_file, split=True),
"tgt": self._load_text_file(args.tgt_file, split=False),
}
def send_src(self, sent_id, *args):
if self.steps[sent_id] >= len(self.data["src"][sent_id]):
dict_to_return = {
"sent_id": sent_id,
"segment_id": self.steps[sent_id],
"segment": self.eos,
}
# Consider EOS
self.steps[sent_id] = len(self.data["src"][sent_id]) + 1
else:
dict_to_return = {
"sent_id": sent_id,
"segment_id": self.steps[sent_id],
"segment": self.data["src"][sent_id][self.steps[sent_id]],
}
self.steps[sent_id] += 1
return dict_to_return
def src_lengths(self):
# +1 for eos
return [len(sent) + 1 for sent in self.data["src"]]
| COCO-LM/fairseq/examples/simultaneous_translation/eval/scorers/text_scorer.py/0 | {
"file_path": "COCO-LM/fairseq/examples/simultaneous_translation/eval/scorers/text_scorer.py",
"repo_id": "COCO-LM",
"token_count": 644
} | 175 |
import importlib
import os
# ASG loss requires flashlight bindings
files_to_skip = set()
try:
import flashlight.lib.sequence.criterion
except ImportError:
files_to_skip.add("ASG_loss.py")
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_") and file not in files_to_skip:
criterion_name = file[: file.find(".py")]
importlib.import_module(
"examples.speech_recognition.criterions." + criterion_name
)
| COCO-LM/fairseq/examples/speech_recognition/criterions/__init__.py/0 | {
"file_path": "COCO-LM/fairseq/examples/speech_recognition/criterions/__init__.py",
"repo_id": "COCO-LM",
"token_count": 197
} | 176 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import math
from collections.abc import Iterable
import torch
import torch.nn as nn
from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqEncoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
LinearizedConvolution,
TransformerDecoderLayer,
TransformerEncoderLayer,
VGGBlock,
)
@register_model("asr_vggtransformer")
class VGGTransformerModel(FairseqEncoderDecoderModel):
"""
Transformers with convolutional context for ASR
https://arxiv.org/abs/1904.11660
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--vggblock-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one vggblock:
[(out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
use_layer_norm), ...])
""",
)
parser.add_argument(
"--transformer-enc-config",
type=str,
metavar="EXPR",
help=""""
a tuple containing the configuration of the encoder transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ...]')
""",
)
parser.add_argument(
"--enc-output-dim",
type=int,
metavar="N",
help="""
encoder output dimension, can be None. If specified, projecting the
transformer output to the specified dimension""",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--tgt-embed-dim",
type=int,
metavar="N",
help="embedding dimension of the decoder target tokens",
)
parser.add_argument(
"--transformer-dec-config",
type=str,
metavar="EXPR",
help="""
a tuple containing the configuration of the decoder transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ...]
""",
)
parser.add_argument(
"--conv-dec-config",
type=str,
metavar="EXPR",
help="""
an array of tuples for the decoder 1-D convolution config
[(out_channels, conv_kernel_size, use_layer_norm), ...]""",
)
@classmethod
def build_encoder(cls, args, task):
return VGGTransformerEncoder(
input_feat_per_channel=args.input_feat_per_channel,
vggblock_config=eval(args.vggblock_enc_config),
transformer_config=eval(args.transformer_enc_config),
encoder_output_dim=args.enc_output_dim,
in_channels=args.in_channels,
)
@classmethod
def build_decoder(cls, args, task):
return TransformerDecoder(
dictionary=task.target_dictionary,
embed_dim=args.tgt_embed_dim,
transformer_config=eval(args.transformer_dec_config),
conv_config=eval(args.conv_dec_config),
encoder_output_dim=args.enc_output_dim,
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted
# (in case there are any new ones)
base_architecture(args)
encoder = cls.build_encoder(args, task)
decoder = cls.build_decoder(args, task)
return cls(encoder, decoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
DEFAULT_ENC_VGGBLOCK_CONFIG = ((32, 3, 2, 2, False),) * 2
DEFAULT_ENC_TRANSFORMER_CONFIG = ((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2
# 256: embedding dimension
# 4: number of heads
# 1024: FFN
# True: apply layerNorm before (dropout + resiaul) instead of after
# 0.2 (dropout): dropout after MultiheadAttention and second FC
# 0.2 (attention_dropout): dropout in MultiheadAttention
# 0.2 (relu_dropout): dropout after ReLu
DEFAULT_DEC_TRANSFORMER_CONFIG = ((256, 2, 1024, True, 0.2, 0.2, 0.2),) * 2
DEFAULT_DEC_CONV_CONFIG = ((256, 3, True),) * 2
# TODO: repace transformer encoder config from one liner
# to explicit args to get rid of this transformation
def prepare_transformer_encoder_params(
input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout,
):
args = argparse.Namespace()
args.encoder_embed_dim = input_dim
args.encoder_attention_heads = num_heads
args.attention_dropout = attention_dropout
args.dropout = dropout
args.activation_dropout = relu_dropout
args.encoder_normalize_before = normalize_before
args.encoder_ffn_embed_dim = ffn_dim
return args
def prepare_transformer_decoder_params(
input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout,
):
args = argparse.Namespace()
args.decoder_embed_dim = input_dim
args.decoder_attention_heads = num_heads
args.attention_dropout = attention_dropout
args.dropout = dropout
args.activation_dropout = relu_dropout
args.decoder_normalize_before = normalize_before
args.decoder_ffn_embed_dim = ffn_dim
return args
class VGGTransformerEncoder(FairseqEncoder):
"""VGG + Transformer encoder"""
def __init__(
self,
input_feat_per_channel,
vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
encoder_output_dim=512,
in_channels=1,
transformer_context=None,
transformer_sampling=None,
):
"""constructor for VGGTransformerEncoder
Args:
- input_feat_per_channel: feature dim (not including stacked,
just base feature)
- in_channel: # input channels (e.g., if stack 8 feature vector
together, this is 8)
- vggblock_config: configuration of vggblock, see comments on
DEFAULT_ENC_VGGBLOCK_CONFIG
- transformer_config: configuration of transformer layer, see comments
on DEFAULT_ENC_TRANSFORMER_CONFIG
- encoder_output_dim: final transformer output embedding dimension
- transformer_context: (left, right) if set, self-attention will be focused
on (t-left, t+right)
- transformer_sampling: an iterable of int, must match with
len(transformer_config), transformer_sampling[i] indicates sampling
factor for i-th transformer layer, after multihead att and feedfoward
part
"""
super().__init__(None)
self.num_vggblocks = 0
if vggblock_config is not None:
if not isinstance(vggblock_config, Iterable):
raise ValueError("vggblock_config is not iterable")
self.num_vggblocks = len(vggblock_config)
self.conv_layers = nn.ModuleList()
self.in_channels = in_channels
self.input_dim = input_feat_per_channel
self.pooling_kernel_sizes = []
if vggblock_config is not None:
for _, config in enumerate(vggblock_config):
(
out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
layer_norm,
) = config
self.conv_layers.append(
VGGBlock(
in_channels,
out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
input_dim=input_feat_per_channel,
layer_norm=layer_norm,
)
)
self.pooling_kernel_sizes.append(pooling_kernel_size)
in_channels = out_channels
input_feat_per_channel = self.conv_layers[-1].output_dim
transformer_input_dim = self.infer_conv_output_dim(
self.in_channels, self.input_dim
)
# transformer_input_dim is the output dimension of VGG part
self.validate_transformer_config(transformer_config)
self.transformer_context = self.parse_transformer_context(transformer_context)
self.transformer_sampling = self.parse_transformer_sampling(
transformer_sampling, len(transformer_config)
)
self.transformer_layers = nn.ModuleList()
if transformer_input_dim != transformer_config[0][0]:
self.transformer_layers.append(
Linear(transformer_input_dim, transformer_config[0][0])
)
self.transformer_layers.append(
TransformerEncoderLayer(
prepare_transformer_encoder_params(*transformer_config[0])
)
)
for i in range(1, len(transformer_config)):
if transformer_config[i - 1][0] != transformer_config[i][0]:
self.transformer_layers.append(
Linear(transformer_config[i - 1][0], transformer_config[i][0])
)
self.transformer_layers.append(
TransformerEncoderLayer(
prepare_transformer_encoder_params(*transformer_config[i])
)
)
self.encoder_output_dim = encoder_output_dim
self.transformer_layers.extend(
[
Linear(transformer_config[-1][0], encoder_output_dim),
LayerNorm(encoder_output_dim),
]
)
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
bsz, max_seq_len, _ = src_tokens.size()
x = src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim)
x = x.transpose(1, 2).contiguous()
# (B, C, T, feat)
for layer_idx in range(len(self.conv_layers)):
x = self.conv_layers[layer_idx](x)
bsz, _, output_seq_len, _ = x.size()
# (B, C, T, feat) -> (B, T, C, feat) -> (T, B, C, feat) -> (T, B, C * feat)
x = x.transpose(1, 2).transpose(0, 1)
x = x.contiguous().view(output_seq_len, bsz, -1)
input_lengths = src_lengths.clone()
for s in self.pooling_kernel_sizes:
input_lengths = (input_lengths.float() / s).ceil().long()
encoder_padding_mask, _ = lengths_to_encoder_padding_mask(
input_lengths, batch_first=True
)
if not encoder_padding_mask.any():
encoder_padding_mask = None
subsampling_factor = int(max_seq_len * 1.0 / output_seq_len + 0.5)
attn_mask = self.lengths_to_attn_mask(input_lengths, subsampling_factor)
transformer_layer_idx = 0
for layer_idx in range(len(self.transformer_layers)):
if isinstance(self.transformer_layers[layer_idx], TransformerEncoderLayer):
x = self.transformer_layers[layer_idx](
x, encoder_padding_mask, attn_mask
)
if self.transformer_sampling[transformer_layer_idx] != 1:
sampling_factor = self.transformer_sampling[transformer_layer_idx]
x, encoder_padding_mask, attn_mask = self.slice(
x, encoder_padding_mask, attn_mask, sampling_factor
)
transformer_layer_idx += 1
else:
x = self.transformer_layers[layer_idx](x)
# encoder_padding_maks is a (T x B) tensor, its [t, b] elements indicate
# whether encoder_output[t, b] is valid or not (valid=0, invalid=1)
return {
"encoder_out": x, # (T, B, C)
"encoder_padding_mask": encoder_padding_mask.t()
if encoder_padding_mask is not None
else None,
# (B, T) --> (T, B)
}
def infer_conv_output_dim(self, in_channels, input_dim):
sample_seq_len = 200
sample_bsz = 10
x = torch.randn(sample_bsz, in_channels, sample_seq_len, input_dim)
for i, _ in enumerate(self.conv_layers):
x = self.conv_layers[i](x)
x = x.transpose(1, 2)
mb, seq = x.size()[:2]
return x.contiguous().view(mb, seq, -1).size(-1)
def validate_transformer_config(self, transformer_config):
for config in transformer_config:
input_dim, num_heads = config[:2]
if input_dim % num_heads != 0:
msg = (
"ERROR in transformer config {}: ".format(config)
+ "input dimension {} ".format(input_dim)
+ "not dividable by number of heads {}".format(num_heads)
)
raise ValueError(msg)
def parse_transformer_context(self, transformer_context):
"""
transformer_context can be the following:
- None; indicates no context is used, i.e.,
transformer can access full context
- a tuple/list of two int; indicates left and right context,
any number <0 indicates infinite context
* e.g., (5, 6) indicates that for query at x_t, transformer can
access [t-5, t+6] (inclusive)
* e.g., (-1, 6) indicates that for query at x_t, transformer can
access [0, t+6] (inclusive)
"""
if transformer_context is None:
return None
if not isinstance(transformer_context, Iterable):
raise ValueError("transformer context must be Iterable if it is not None")
if len(transformer_context) != 2:
raise ValueError("transformer context must have length 2")
left_context = transformer_context[0]
if left_context < 0:
left_context = None
right_context = transformer_context[1]
if right_context < 0:
right_context = None
if left_context is None and right_context is None:
return None
return (left_context, right_context)
def parse_transformer_sampling(self, transformer_sampling, num_layers):
"""
parsing transformer sampling configuration
Args:
- transformer_sampling, accepted input:
* None, indicating no sampling
* an Iterable with int (>0) as element
- num_layers, expected number of transformer layers, must match with
the length of transformer_sampling if it is not None
Returns:
- A tuple with length num_layers
"""
if transformer_sampling is None:
return (1,) * num_layers
if not isinstance(transformer_sampling, Iterable):
raise ValueError(
"transformer_sampling must be an iterable if it is not None"
)
if len(transformer_sampling) != num_layers:
raise ValueError(
"transformer_sampling {} does not match with the number "
"of layers {}".format(transformer_sampling, num_layers)
)
for layer, value in enumerate(transformer_sampling):
if not isinstance(value, int):
raise ValueError("Invalid value in transformer_sampling: ")
if value < 1:
raise ValueError(
"{} layer's subsampling is {}.".format(layer, value)
+ " This is not allowed! "
)
return transformer_sampling
def slice(self, embedding, padding_mask, attn_mask, sampling_factor):
"""
embedding is a (T, B, D) tensor
padding_mask is a (B, T) tensor or None
attn_mask is a (T, T) tensor or None
"""
embedding = embedding[::sampling_factor, :, :]
if padding_mask is not None:
padding_mask = padding_mask[:, ::sampling_factor]
if attn_mask is not None:
attn_mask = attn_mask[::sampling_factor, ::sampling_factor]
return embedding, padding_mask, attn_mask
def lengths_to_attn_mask(self, input_lengths, subsampling_factor=1):
"""
create attention mask according to sequence lengths and transformer
context
Args:
- input_lengths: (B, )-shape Int/Long tensor; input_lengths[b] is
the length of b-th sequence
- subsampling_factor: int
* Note that the left_context and right_context is specified in
the input frame-level while input to transformer may already
go through subsampling (e.g., the use of striding in vggblock)
we use subsampling_factor to scale the left/right context
Return:
- a (T, T) binary tensor or None, where T is max(input_lengths)
* if self.transformer_context is None, None
* if left_context is None,
* attn_mask[t, t + right_context + 1:] = 1
* others = 0
* if right_context is None,
* attn_mask[t, 0:t - left_context] = 1
* others = 0
* elsif
* attn_mask[t, t - left_context: t + right_context + 1] = 0
* others = 1
"""
if self.transformer_context is None:
return None
maxT = torch.max(input_lengths).item()
attn_mask = torch.zeros(maxT, maxT)
left_context = self.transformer_context[0]
right_context = self.transformer_context[1]
if left_context is not None:
left_context = math.ceil(self.transformer_context[0] / subsampling_factor)
if right_context is not None:
right_context = math.ceil(self.transformer_context[1] / subsampling_factor)
for t in range(maxT):
if left_context is not None:
st = 0
en = max(st, t - left_context)
attn_mask[t, st:en] = 1
if right_context is not None:
st = t + right_context + 1
st = min(st, maxT - 1)
attn_mask[t, st:] = 1
return attn_mask.to(input_lengths.device)
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(1, new_order)
return encoder_out
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs.
Default: ``False``
left_pad (bool, optional): whether the input is left-padded. Default:
``False``
"""
def __init__(
self,
dictionary,
embed_dim=512,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
conv_config=DEFAULT_DEC_CONV_CONFIG,
encoder_output_dim=512,
):
super().__init__(dictionary)
vocab_size = len(dictionary)
self.padding_idx = dictionary.pad()
self.embed_tokens = Embedding(vocab_size, embed_dim, self.padding_idx)
self.conv_layers = nn.ModuleList()
for i in range(len(conv_config)):
out_channels, kernel_size, layer_norm = conv_config[i]
if i == 0:
conv_layer = LinearizedConv1d(
embed_dim, out_channels, kernel_size, padding=kernel_size - 1
)
else:
conv_layer = LinearizedConv1d(
conv_config[i - 1][0],
out_channels,
kernel_size,
padding=kernel_size - 1,
)
self.conv_layers.append(conv_layer)
if layer_norm:
self.conv_layers.append(nn.LayerNorm(out_channels))
self.conv_layers.append(nn.ReLU())
self.layers = nn.ModuleList()
if conv_config[-1][0] != transformer_config[0][0]:
self.layers.append(Linear(conv_config[-1][0], transformer_config[0][0]))
self.layers.append(
TransformerDecoderLayer(
prepare_transformer_decoder_params(*transformer_config[0])
)
)
for i in range(1, len(transformer_config)):
if transformer_config[i - 1][0] != transformer_config[i][0]:
self.layers.append(
Linear(transformer_config[i - 1][0], transformer_config[i][0])
)
self.layers.append(
TransformerDecoderLayer(
prepare_transformer_decoder_params(*transformer_config[i])
)
)
self.fc_out = Linear(transformer_config[-1][0], vocab_size)
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the last decoder layer's output of shape `(batch, tgt_len,
vocab)`
- the last decoder layer's attention weights of shape `(batch,
tgt_len, src_len)`
"""
target_padding_mask = (
(prev_output_tokens == self.padding_idx).to(prev_output_tokens.device)
if incremental_state is None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
# embed tokens
x = self.embed_tokens(prev_output_tokens)
# B x T x C -> T x B x C
x = self._transpose_if_training(x, incremental_state)
for layer in self.conv_layers:
if isinstance(layer, LinearizedConvolution):
x = layer(x, incremental_state)
else:
x = layer(x)
# B x T x C -> T x B x C
x = self._transpose_if_inference(x, incremental_state)
# decoder layers
for layer in self.layers:
if isinstance(layer, TransformerDecoderLayer):
x, *_ = layer(
x,
(encoder_out["encoder_out"] if encoder_out is not None else None),
(
encoder_out["encoder_padding_mask"].t()
if encoder_out["encoder_padding_mask"] is not None
else None
),
incremental_state,
self_attn_mask=(
self.buffered_future_mask(x)
if incremental_state is None
else None
),
self_attn_padding_mask=(
target_padding_mask if incremental_state is None else None
),
)
else:
x = layer(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
x = self.fc_out(x)
return x, None
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, "_future_mask")
or self._future_mask is None
or self._future_mask.device != tensor.device
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(
utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1
)
return self._future_mask[:dim, :dim]
def _transpose_if_training(self, x, incremental_state):
if incremental_state is None:
x = x.transpose(0, 1)
return x
def _transpose_if_inference(self, x, incremental_state):
if incremental_state:
x = x.transpose(0, 1)
return x
@register_model("asr_vggtransformer_encoder")
class VGGTransformerEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--vggblock-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one vggblock
[(out_channels, conv_kernel_size, pooling_kernel_size,num_conv_layers), ...]
""",
)
parser.add_argument(
"--transformer-enc-config",
type=str,
metavar="EXPR",
help="""
a tuple containing the configuration of the Transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ]""",
)
parser.add_argument(
"--enc-output-dim",
type=int,
metavar="N",
help="encoder output dimension, projecting the LSTM output",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--transformer-context",
type=str,
metavar="EXPR",
help="""
either None or a tuple of two ints, indicating left/right context a
transformer can have access to""",
)
parser.add_argument(
"--transformer-sampling",
type=str,
metavar="EXPR",
help="""
either None or a tuple of ints, indicating sampling factor in each layer""",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
base_architecture_enconly(args)
encoder = VGGTransformerEncoderOnly(
vocab_size=len(task.target_dictionary),
input_feat_per_channel=args.input_feat_per_channel,
vggblock_config=eval(args.vggblock_enc_config),
transformer_config=eval(args.transformer_enc_config),
encoder_output_dim=args.enc_output_dim,
in_channels=args.in_channels,
transformer_context=eval(args.transformer_context),
transformer_sampling=eval(args.transformer_sampling),
)
return cls(encoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
# net_output['encoder_out'] is a (T, B, D) tensor
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
# lprobs is a (T, B, D) tensor
# we need to transoose to get (B, T, D) tensor
lprobs = lprobs.transpose(0, 1).contiguous()
lprobs.batch_first = True
return lprobs
class VGGTransformerEncoderOnly(VGGTransformerEncoder):
def __init__(
self,
vocab_size,
input_feat_per_channel,
vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
encoder_output_dim=512,
in_channels=1,
transformer_context=None,
transformer_sampling=None,
):
super().__init__(
input_feat_per_channel=input_feat_per_channel,
vggblock_config=vggblock_config,
transformer_config=transformer_config,
encoder_output_dim=encoder_output_dim,
in_channels=in_channels,
transformer_context=transformer_context,
transformer_sampling=transformer_sampling,
)
self.fc_out = Linear(self.encoder_output_dim, vocab_size)
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
enc_out = super().forward(src_tokens, src_lengths)
x = self.fc_out(enc_out["encoder_out"])
# x = F.log_softmax(x, dim=-1)
# Note: no need this line, because model.get_normalized_prob will call
# log_softmax
return {
"encoder_out": x, # (T, B, C)
"encoder_padding_mask": enc_out["encoder_padding_mask"], # (T, B)
}
def max_positions(self):
"""Maximum input length supported by the encoder."""
return (1e6, 1e6) # an arbitrary large number
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
# nn.init.uniform_(m.weight, -0.1, 0.1)
# nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True, dropout=0):
"""Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
# m.weight.data.uniform_(-0.1, 0.1)
# if bias:
# m.bias.data.uniform_(-0.1, 0.1)
return m
def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0, **kwargs):
"""Weight-normalized Conv1d layer optimized for decoding"""
m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
nn.init.normal_(m.weight, mean=0, std=std)
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m, dim=2)
def LayerNorm(embedding_dim):
m = nn.LayerNorm(embedding_dim)
return m
# seq2seq models
def base_architecture(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", DEFAULT_ENC_VGGBLOCK_CONFIG
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", DEFAULT_ENC_TRANSFORMER_CONFIG
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.in_channels = getattr(args, "in_channels", 1)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128)
args.transformer_dec_config = getattr(
args, "transformer_dec_config", DEFAULT_ENC_TRANSFORMER_CONFIG
)
args.conv_dec_config = getattr(args, "conv_dec_config", DEFAULT_DEC_CONV_CONFIG)
args.transformer_context = getattr(args, "transformer_context", "None")
@register_model_architecture("asr_vggtransformer", "vggtransformer_1")
def vggtransformer_1(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 14",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args,
"transformer_dec_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 4",
)
@register_model_architecture("asr_vggtransformer", "vggtransformer_2")
def vggtransformer_2(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args,
"transformer_dec_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 6",
)
@register_model_architecture("asr_vggtransformer", "vggtransformer_base")
def vggtransformer_base(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 12"
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args, "transformer_dec_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 6"
)
# Size estimations:
# Encoder:
# - vggblock param: 64*1*3*3 + 64*64*3*3 + 128*64*3*3 + 128*128*3 = 258K
# Transformer:
# - input dimension adapter: 2560 x 512 -> 1.31M
# - transformer_layers (x12) --> 37.74M
# * MultiheadAttention: 512*512*3 (in_proj) + 512*512 (out_proj) = 1.048M
# * FFN weight: 512*2048*2 = 2.097M
# - output dimension adapter: 512 x 512 -> 0.26 M
# Decoder:
# - LinearizedConv1d: 512 * 256 * 3 + 256 * 256 * 3 * 3
# - transformer_layer: (x6) --> 25.16M
# * MultiheadAttention (self-attention): 512*512*3 + 512*512 = 1.048M
# * MultiheadAttention (encoder-attention): 512*512*3 + 512*512 = 1.048M
# * FFN: 512*2048*2 = 2.097M
# Final FC:
# - FC: 512*5000 = 256K (assuming vocab size 5K)
# In total:
# ~65 M
# CTC models
def base_architecture_enconly(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(32, 3, 2, 2, True)] * 2"
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", "((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2"
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.in_channels = getattr(args, "in_channels", 1)
args.transformer_context = getattr(args, "transformer_context", "None")
args.transformer_sampling = getattr(args, "transformer_sampling", "None")
@register_model_architecture("asr_vggtransformer_encoder", "vggtransformer_enc_1")
def vggtransformer_enc_1(args):
# vggtransformer_1 is the same as vggtransformer_enc_big, except the number
# of layers is increased to 16
# keep it here for backward compatiablity purpose
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
| COCO-LM/fairseq/examples/speech_recognition/models/vggtransformer.py/0 | {
"file_path": "COCO-LM/fairseq/examples/speech_recognition/models/vggtransformer.py",
"repo_id": "COCO-LM",
"token_count": 17782
} | 177 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
from pathlib import Path
import shutil
from itertools import groupby
from tempfile import NamedTemporaryFile
from typing import Tuple
import numpy as np
import pandas as pd
import soundfile as sf
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
filter_manifest_df,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
load_df_from_tsv,
save_df_to_tsv,
cal_gcmvn_stats,
)
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
from fairseq.data.audio.audio_utils import get_waveform
log = logging.getLogger(__name__)
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"]
class MUSTC(Dataset):
"""
Create a Dataset for MuST-C. Each item is a tuple of the form:
waveform, sample_rate, source utterance, target utterance, speaker_id,
utterance_id
"""
SPLITS = ["train", "dev", "tst-COMMON", "tst-HE"]
LANGUAGES = ["de", "es", "fr", "it", "nl", "pt", "ro", "ru"]
def __init__(self, root: str, lang: str, split: str) -> None:
assert split in self.SPLITS and lang in self.LANGUAGES
_root = Path(root) / f"en-{lang}" / "data" / split
wav_root, txt_root = _root / "wav", _root / "txt"
assert _root.is_dir() and wav_root.is_dir() and txt_root.is_dir()
# Load audio segments
try:
import yaml
except ImportError:
print("Please install PyYAML to load the MuST-C YAML files")
with open(txt_root / f"{split}.yaml") as f:
segments = yaml.load(f, Loader=yaml.BaseLoader)
# Load source and target utterances
for _lang in ["en", lang]:
with open(txt_root / f"{split}.{_lang}") as f:
utterances = [r.strip() for r in f]
assert len(segments) == len(utterances)
for i, u in enumerate(utterances):
segments[i][_lang] = u
# Gather info
self.data = []
for wav_filename, _seg_group in groupby(segments, lambda x: x["wav"]):
wav_path = wav_root / wav_filename
sample_rate = sf.info(wav_path.as_posix()).samplerate
seg_group = sorted(_seg_group, key=lambda x: x["offset"])
for i, segment in enumerate(seg_group):
offset = int(float(segment["offset"]) * sample_rate)
n_frames = int(float(segment["duration"]) * sample_rate)
_id = f"{wav_path.stem}_{i}"
self.data.append(
(
wav_path.as_posix(),
offset,
n_frames,
sample_rate,
segment["en"],
segment[lang],
segment["speaker_id"],
_id,
)
)
def __getitem__(self, n: int) -> Tuple[torch.Tensor, int, str, str, str, str]:
wav_path, offset, n_frames, sr, src_utt, tgt_utt, spk_id, utt_id = self.data[n]
waveform, _ = get_waveform(wav_path, frames=n_frames, start=offset)
waveform = torch.from_numpy(waveform)
return waveform, sr, src_utt, tgt_utt, spk_id, utt_id
def __len__(self) -> int:
return len(self.data)
def process(args):
root = Path(args.data_root).absolute()
for lang in MUSTC.LANGUAGES:
cur_root = root / f"en-{lang}"
if not cur_root.is_dir():
print(f"{cur_root.as_posix()} does not exist. Skipped.")
continue
# Extract features
feature_root = cur_root / "fbank80"
feature_root.mkdir(exist_ok=True)
for split in MUSTC.SPLITS:
print(f"Fetching split {split}...")
dataset = MUSTC(root.as_posix(), lang, split)
print("Extracting log mel filter bank features...")
if split == 'train' and args.cmvn_type == "global":
print("And estimating cepstral mean and variance stats...")
gcmvn_feature_list = []
for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset):
features = extract_fbank_features(waveform, sample_rate)
np.save(
(feature_root / f"{utt_id}.npy").as_posix(),
features
)
if split == 'train' and args.cmvn_type == "global":
if len(gcmvn_feature_list) < args.gcmvn_max_num:
gcmvn_feature_list.append(features)
if split == 'train' and args.cmvn_type == "global":
# Estimate and save cmv
stats = cal_gcmvn_stats(gcmvn_feature_list)
with open(cur_root / "gcmvn.npz", "wb") as f:
np.savez(f, mean=stats["mean"], std=stats["std"])
# Pack features into ZIP
zip_path = cur_root / "fbank80.zip"
print("ZIPing features...")
create_zip(feature_root, zip_path)
print("Fetching ZIP manifest...")
zip_manifest = get_zip_manifest(zip_path)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
for split in MUSTC.SPLITS:
is_train_split = split.startswith("train")
manifest = {c: [] for c in MANIFEST_COLUMNS}
dataset = MUSTC(args.data_root, lang, split)
for wav, sr, src_utt, tgt_utt, speaker_id, utt_id in tqdm(dataset):
manifest["id"].append(utt_id)
manifest["audio"].append(zip_manifest[utt_id])
duration_ms = int(wav.size(1) / sr * 1000)
manifest["n_frames"].append(int(1 + (duration_ms - 25) / 10))
manifest["tgt_text"].append(src_utt if args.task == "asr" else tgt_utt)
manifest["speaker"].append(speaker_id)
if is_train_split:
train_text.extend(manifest["tgt_text"])
df = pd.DataFrame.from_dict(manifest)
df = filter_manifest_df(df, is_train_split=is_train_split)
save_df_to_tsv(df, cur_root / f"{split}_{args.task}.tsv")
# Generate vocab
v_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{args.task}"
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
cur_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
)
# Generate config YAML
gen_config_yaml(
cur_root,
spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy="lb",
cmvn_type=args.cmvn_type,
gcmvn_path=(
cur_root / "gcmvn.npz" if args.cmvn_type == "global"
else None
),
)
# Clean up
shutil.rmtree(feature_root)
def process_joint(args):
cur_root = Path(args.data_root)
assert all((cur_root / f"en-{lang}").is_dir() for lang in MUSTC.LANGUAGES), \
"do not have downloaded data available for all 8 languages"
# Generate vocab
vocab_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size_str}_{args.task}"
with NamedTemporaryFile(mode="w") as f:
for lang in MUSTC.LANGUAGES:
tsv_path = cur_root / f"en-{lang}" / f"train_{args.task}.tsv"
df = load_df_from_tsv(tsv_path)
for t in df["tgt_text"]:
f.write(t + "\n")
special_symbols = None
if args.task == 'st':
special_symbols = [f'<lang:{lang}>' for lang in MUSTC.LANGUAGES]
gen_vocab(
Path(f.name),
cur_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
special_symbols=special_symbols
)
# Generate config YAML
gen_config_yaml(
cur_root,
spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy="ld",
prepend_tgt_lang_tag=(args.task == "st"),
)
# Make symbolic links to manifests
for lang in MUSTC.LANGUAGES:
for split in MUSTC.SPLITS:
src_path = cur_root / f"en-{lang}" / f"{split}_{args.task}.tsv"
desc_path = cur_root / f"{split}_{lang}_{args.task}.tsv"
if not desc_path.is_symlink():
os.symlink(src_path, desc_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data-root", "-d", required=True, type=str)
parser.add_argument(
"--vocab-type",
default="unigram",
required=True,
type=str,
choices=["bpe", "unigram", "char"],
),
parser.add_argument("--vocab-size", default=8000, type=int)
parser.add_argument("--task", type=str, choices=["asr", "st"])
parser.add_argument("--joint", action="store_true", help="")
parser.add_argument("--cmvn-type", default="utterance",
choices=["global", "utterance"],
help="The type of cepstral mean and variance normalization")
parser.add_argument("--gcmvn-max-num", default=150000, type=int,
help=(
"Maximum number of sentences to use to estimate"
"global mean and variance"
))
args = parser.parse_args()
if args.joint:
process_joint(args)
else:
process(args)
if __name__ == "__main__":
main()
| COCO-LM/fairseq/examples/speech_to_text/prep_mustc_data.py/0 | {
"file_path": "COCO-LM/fairseq/examples/speech_to_text/prep_mustc_data.py",
"repo_id": "COCO-LM",
"token_count": 5033
} | 178 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
import torch
from fairseq.data import Dictionary, FairseqDataset
from fairseq.tasks import LegacyFairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task("dummy_masked_lm")
class DummyMaskedLMTask(LegacyFairseqTask):
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument("--dict-size", default=49995, type=int)
parser.add_argument("--dataset-size", default=100000, type=int)
parser.add_argument(
"--tokens-per-sample",
default=512,
type=int,
help="max number of total tokens over all segments "
"per sample for BERT dataset",
)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
# add mask token
self.mask_idx = dictionary.add_symbol("<mask>")
dictionary.pad_to_multiple_(8) # often faster if divisible by 8
mask_idx = 0
pad_idx = 1
seq = torch.arange(args.tokens_per_sample) + pad_idx + 1
mask = torch.arange(2, args.tokens_per_sample, 7) # ~15%
src = seq.clone()
src[mask] = mask_idx
tgt = torch.full_like(seq, pad_idx)
tgt[mask] = seq[mask]
self.dummy_src = src
self.dummy_tgt = tgt
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task. """
dictionary = Dictionary()
for i in range(args.dict_size):
dictionary.add_symbol("word{}".format(i))
logger.info("dictionary: {} types".format(len(dictionary)))
return cls(args, dictionary)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if self.args.batch_size is not None:
bsz = self.args.batch_size
else:
bsz = max(1, self.args.max_tokens // self.args.tokens_per_sample)
self.datasets[split] = DummyDataset(
{
"id": 1,
"net_input": {
"src_tokens": torch.stack([self.dummy_src for _ in range(bsz)]),
"src_lengths": torch.full(
(bsz,), self.args.tokens_per_sample, dtype=torch.long
),
},
"target": torch.stack([self.dummy_tgt for _ in range(bsz)]),
"nsentences": bsz,
"ntokens": bsz * self.args.tokens_per_sample,
},
num_items=self.args.dataset_size,
item_size=self.args.tokens_per_sample,
)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
class DummyDataset(FairseqDataset):
def __init__(self, batch, num_items, item_size):
super().__init__()
self.batch = batch
self.num_items = num_items
self.item_size = item_size
def __getitem__(self, index):
return index
def __len__(self):
return self.num_items
def collater(self, samples):
return self.batch
@property
def sizes(self):
return np.array([self.item_size] * self.num_items)
def num_tokens(self, index):
return self.item_size
def size(self, index):
return self.item_size
def ordered_indices(self):
return np.arange(self.num_items)
@property
def supports_prefetch(self):
return False
| COCO-LM/fairseq/fairseq/benchmark/dummy_masked_lm.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/benchmark/dummy_masked_lm.py",
"repo_id": "COCO-LM",
"token_count": 1793
} | 179 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import inspect
from typing import Any, Dict, List
from fairseq import metrics, utils
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import gen_parser_from_dataclass
from torch.nn.modules.loss import _Loss
class FairseqCriterion(_Loss):
def __init__(self, task):
super().__init__()
self.task = task
self.args = task.args
if hasattr(task, "target_dictionary"):
tgt_dict = task.target_dictionary
self.padding_idx = tgt_dict.pad() if tgt_dict is not None else -100
@classmethod
def add_args(cls, parser):
"""Add criterion-specific arguments to the parser."""
dc = getattr(cls, "__dataclass", None)
if dc is not None:
gen_parser_from_dataclass(parser, dc())
@classmethod
def build_criterion(cls, cfg: FairseqDataclass, task):
"""Construct a criterion from command-line args."""
# arguments in the __init__.
init_args = {}
for p in inspect.signature(cls).parameters.values():
if (
p.kind == p.POSITIONAL_ONLY
or p.kind == p.VAR_POSITIONAL
or p.kind == p.VAR_KEYWORD
):
# we haven't implemented inference for these argument types,
# but PRs welcome :)
raise NotImplementedError("{} not supported".format(p.kind))
assert p.kind in {p.POSITIONAL_OR_KEYWORD, p.KEYWORD_ONLY}
if p.name == "task":
init_args["task"] = task
elif p.name == "cfg":
init_args["cfg"] = cfg
elif hasattr(cfg, p.name):
init_args[p.name] = getattr(cfg, p.name)
elif p.default != p.empty:
pass # we'll use the default value
else:
raise NotImplementedError(
"Unable to infer Criterion arguments, please implement "
"{}.build_criterion".format(cls.__name__)
)
return cls(**init_args)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
raise NotImplementedError
@staticmethod
def aggregate_logging_outputs(
logging_outputs: List[Dict[str, Any]]
) -> Dict[str, Any]:
"""Aggregate logging outputs from data parallel training."""
utils.deprecation_warning(
"The aggregate_logging_outputs API is deprecated. "
"Please use the reduce_metrics API instead."
)
raise NotImplementedError
@classmethod
def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None:
"""Aggregate logging outputs from data parallel training."""
utils.deprecation_warning(
"Criterions should implement the reduce_metrics API. "
"Falling back to deprecated aggregate_logging_outputs API."
)
agg_logging_outputs = cls.aggregate_logging_outputs(logging_outputs)
for k, v in agg_logging_outputs.items():
if k in {"nsentences", "ntokens", "sample_size"}:
continue
metrics.log_scalar(k, v)
def context_metrics(self, logging_outputs) -> None:
pass
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return False
class LegacyFairseqCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(task=task)
self.args = args
utils.deprecation_warning(
"Criterions should take explicit arguments instead of an "
"argparse.Namespace object, please update your criterion by "
"extending FairseqCriterion instead of LegacyFairseqCriterion."
)
@classmethod
def build_criterion(cls, args, task):
"""Construct a criterion from command-line args."""
return cls(args, task)
| COCO-LM/fairseq/fairseq/criterions/fairseq_criterion.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/criterions/fairseq_criterion.py",
"repo_id": "COCO-LM",
"token_count": 1954
} | 180 |
from pathlib import Path
from typing import BinaryIO, Optional, Tuple, Union
import numpy as np
import torch
SF_AUDIO_FILE_EXTENSIONS = {".wav", ".flac", ".ogg"}
def _convert_to_mono(
waveform: torch.FloatTensor, sample_rate: int
) -> torch.FloatTensor:
if waveform.shape[0] > 1:
try:
import torchaudio.sox_effects as ta_sox
except ImportError:
raise ImportError(
"Please install torchaudio to convert multi-channel audios"
)
effects = [['channels', '1']]
return ta_sox.apply_effects_tensor(waveform, sample_rate, effects)[0]
return waveform
def convert_to_mono(waveform: np.ndarray, sample_rate: int) -> np.ndarray:
if waveform.shape[0] > 1:
_waveform = torch.from_numpy(waveform)
return _convert_to_mono(_waveform, sample_rate).numpy()
return waveform
def get_waveform(
path_or_fp: Union[str, BinaryIO], normalization=True, mono=True,
frames=-1, start=0, always_2d=True
) -> Tuple[np.ndarray, int]:
"""Get the waveform and sample rate of a 16-bit WAV/FLAC/OGG Vorbis audio.
Args:
path_or_fp (str or BinaryIO): the path or file-like object
normalization (bool): Normalize values to [-1, 1] (Default: True)
mono (bool): convert multi-channel audio to mono-channel one
frames (int): the number of frames to read. (-1 for reading all)
start (int): Where to start reading. A negative value counts from the end.
always_2d (bool): always return 2D array even for mono-channel audios
Returns:
waveform (numpy.ndarray): 1D or 2D waveform (channels x length)
sample_rate (float): sample rate
"""
if isinstance(path_or_fp, str):
ext = Path(path_or_fp).suffix
if ext not in SF_AUDIO_FILE_EXTENSIONS:
raise ValueError(f"Unsupported audio format: {ext}")
try:
import soundfile as sf
except ImportError:
raise ImportError(
"Please install soundfile to load WAV/FLAC/OGG Vorbis audios"
)
waveform, sample_rate = sf.read(
path_or_fp, dtype="float32", always_2d=True, frames=frames, start=start
)
waveform = waveform.T # T x C -> C x T
if mono and waveform.shape[0] > 1:
waveform = convert_to_mono(waveform, sample_rate)
if not normalization:
waveform *= 2 ** 15 # denormalized to 16-bit signed integers
if not always_2d:
waveform = waveform.squeeze(axis=0)
return waveform, sample_rate
def _get_kaldi_fbank(
waveform: np.ndarray, sample_rate: int, n_bins=80
) -> Optional[np.ndarray]:
"""Get mel-filter bank features via PyKaldi."""
try:
from kaldi.feat.mel import MelBanksOptions
from kaldi.feat.fbank import FbankOptions, Fbank
from kaldi.feat.window import FrameExtractionOptions
from kaldi.matrix import Vector
mel_opts = MelBanksOptions()
mel_opts.num_bins = n_bins
frame_opts = FrameExtractionOptions()
frame_opts.samp_freq = sample_rate
opts = FbankOptions()
opts.mel_opts = mel_opts
opts.frame_opts = frame_opts
fbank = Fbank(opts=opts)
features = fbank.compute(Vector(waveform.squeeze()), 1.0).numpy()
return features
except ImportError:
return None
def _get_torchaudio_fbank(
waveform: np.ndarray, sample_rate, n_bins=80
) -> Optional[np.ndarray]:
"""Get mel-filter bank features via TorchAudio."""
try:
import torchaudio.compliance.kaldi as ta_kaldi
waveform = torch.from_numpy(waveform)
features = ta_kaldi.fbank(
waveform, num_mel_bins=n_bins, sample_frequency=sample_rate
)
return features.numpy()
except ImportError:
return None
def get_fbank(path_or_fp: Union[str, BinaryIO], n_bins=80) -> np.ndarray:
"""Get mel-filter bank features via PyKaldi or TorchAudio. Prefer PyKaldi
(faster CPP implementation) to TorchAudio (Python implementation). Note that
Kaldi/TorchAudio requires 16-bit signed integers as inputs and hence the
waveform should not be normalized."""
waveform, sample_rate = get_waveform(path_or_fp, normalization=False)
features = _get_kaldi_fbank(waveform, sample_rate, n_bins)
if features is None:
features = _get_torchaudio_fbank(waveform, sample_rate, n_bins)
if features is None:
raise ImportError(
"Please install pyKaldi or torchaudio to enable "
"online filterbank feature extraction"
)
return features
| COCO-LM/fairseq/fairseq/data/audio/audio_utils.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/data/audio/audio_utils.py",
"repo_id": "COCO-LM",
"token_count": 1918
} | 181 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import numpy as np
import torch
from . import FairseqDataset, data_utils
def collate(
samples,
pad_idx,
eos_idx,
vocab,
left_pad_source=False,
left_pad_target=False,
input_feeding=True,
pad_to_length=None,
):
assert input_feeding
if len(samples) == 0:
return {}
def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx=None, # use eos_idx of each sample instead of vocab.eos()
left_pad=left_pad,
move_eos_to_beginning=move_eos_to_beginning,
pad_to_length=pad_to_length,
)
id = torch.LongTensor([s["id"] for s in samples])
src_tokens = merge(
"source",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
# sort by descending source length
src_lengths = torch.LongTensor([s["source"].numel() for s in samples])
src_lengths, sort_order = src_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
src_tokens = src_tokens.index_select(0, sort_order)
prev_output_tokens = None
target = None
if samples[0].get("target", None) is not None:
target = merge(
"target",
left_pad=left_pad_target,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
target = target.index_select(0, sort_order)
ntokens = sum(len(s["target"]) for s in samples)
if input_feeding:
# we create a shifted version of targets for feeding the
# previous output token(s) into the next decoder step
prev_output_tokens = merge(
"target",
left_pad=left_pad_target,
move_eos_to_beginning=True,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
prev_output_tokens = prev_output_tokens.index_select(0, sort_order)
else:
ntokens = sum(len(s["source"]) for s in samples)
batch = {
"id": id,
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
},
"target": target,
"nsentences": samples[0]["source"].size(0),
"sort_order": sort_order,
}
if prev_output_tokens is not None:
batch["net_input"]["prev_output_tokens"] = prev_output_tokens
return batch
class DenoisingDataset(FairseqDataset):
"""
A wrapper around TokenBlockDataset for BART dataset.
Args:
dataset (TokenBlockDataset): dataset to wrap
sizes (List[int]): sentence lengths
vocab (~fairseq.data.Dictionary): vocabulary
mask_idx (int): dictionary index used for masked token
mask_whole_words: only mask whole words. This should be a byte mask
over vocab indices, indicating whether it is the beginning of a
word. We will extend any mask to encompass the whole word.
shuffle (bool, optional): shuffle the elements before batching.
Default: ``True``
seed: Seed for random number generator for reproducibility.
args: argparse arguments.
"""
def __init__(
self,
dataset,
sizes,
vocab,
mask_idx,
mask_whole_words,
shuffle,
seed,
args,
eos=None,
item_transform_func=None,
):
self.dataset = dataset
self.sizes = sizes
self.vocab = vocab
self.shuffle = shuffle
self.seed = seed
self.mask_idx = mask_idx
self.mask_whole_word = mask_whole_words
self.mask_ratio = args.mask
self.random_ratio = args.mask_random
self.insert_ratio = args.insert
self.rotate_ratio = args.rotate
self.permute_sentence_ratio = args.permute_sentences
self.eos = eos if eos is not None else vocab.eos()
self.item_transform_func = item_transform_func
if args.bpe != "gpt2":
self.full_stop_index = self.vocab.eos()
else:
assert args.bpe == "gpt2"
self.full_stop_index = self.vocab.index("13")
self.replace_length = args.replace_length
if self.replace_length not in [-1, 0, 1]:
raise ValueError(f"invalid arg: replace_length={self.replace_length}")
if args.mask_length not in ["subword", "word", "span-poisson"]:
raise ValueError(f"invalid arg: mask-length={args.mask_length}")
if args.mask_length == "subword" and args.replace_length not in [0, 1]:
raise ValueError(f"if using subwords, use replace-length=1 or 0")
self.mask_span_distribution = None
if args.mask_length == "span-poisson":
_lambda = args.poisson_lambda
lambda_to_the_k = 1
e_to_the_minus_lambda = math.exp(-_lambda)
k_factorial = 1
ps = []
for k in range(0, 128):
ps.append(e_to_the_minus_lambda * lambda_to_the_k / k_factorial)
lambda_to_the_k *= _lambda
k_factorial *= k + 1
if ps[-1] < 0.0000001:
break
ps = torch.FloatTensor(ps)
self.mask_span_distribution = torch.distributions.Categorical(ps)
self.epoch = 0
@property
def can_reuse_epoch_itr_across_epochs(self):
return True # only the noise changes, not item sizes
def set_epoch(self, epoch, **unused):
self.epoch = epoch
def __getitem__(self, index):
with data_utils.numpy_seed(self.seed, self.epoch, index):
tokens = self.dataset[index]
assert tokens[-1] == self.eos
source, target = tokens, tokens.clone()
if self.permute_sentence_ratio > 0.0:
source = self.permute_sentences(source, self.permute_sentence_ratio)
if self.mask_ratio > 0:
source = self.add_whole_word_mask(source, self.mask_ratio)
if self.insert_ratio > 0:
source = self.add_insertion_noise(source, self.insert_ratio)
if self.rotate_ratio > 0.0 and np.random.random() < self.rotate_ratio:
source = self.add_rolling_noise(source)
# there can additional changes to make:
if self.item_transform_func is not None:
source, target = self.item_transform_func(source, target)
assert (source >= 0).all()
assert (source[1:-1] >= 1).all()
assert (source <= len(self.vocab)).all()
assert source[0] == self.vocab.bos()
assert source[-1] == self.eos
return {
"id": index,
"source": source,
"target": target,
}
def __len__(self):
return len(self.dataset)
def permute_sentences(self, source, p=1.0):
full_stops = source == self.full_stop_index
# Pretend it ends with a full stop so last span is a sentence
full_stops[-2] = 1
# Tokens that are full stops, where the previous token is not
sentence_ends = (full_stops[1:] * ~full_stops[:-1]).nonzero(as_tuple=False) + 2
result = source.clone()
num_sentences = sentence_ends.size(0)
num_to_permute = math.ceil((num_sentences * 2 * p) / 2.0)
substitutions = torch.randperm(num_sentences)[:num_to_permute]
ordering = torch.arange(0, num_sentences)
ordering[substitutions] = substitutions[torch.randperm(num_to_permute)]
# Ignore <bos> at start
index = 1
for i in ordering:
sentence = source[(sentence_ends[i - 1] if i > 0 else 1) : sentence_ends[i]]
result[index : index + sentence.size(0)] = sentence
index += sentence.size(0)
return result
def word_starts(self, source):
if self.mask_whole_word is not None:
is_word_start = self.mask_whole_word.gather(0, source)
else:
is_word_start = torch.ones(source.size())
is_word_start[0] = 0
is_word_start[-1] = 0
return is_word_start
def add_whole_word_mask(self, source, p):
is_word_start = self.word_starts(source)
num_to_mask = int(math.ceil(is_word_start.float().sum() * p))
num_inserts = 0
if num_to_mask == 0:
return source
if self.mask_span_distribution is not None:
lengths = self.mask_span_distribution.sample(sample_shape=(num_to_mask,))
# Make sure we have enough to mask
cum_length = torch.cumsum(lengths, 0)
while cum_length[-1] < num_to_mask:
lengths = torch.cat(
[
lengths,
self.mask_span_distribution.sample(sample_shape=(num_to_mask,)),
],
dim=0,
)
cum_length = torch.cumsum(lengths, 0)
# Trim to masking budget
i = 0
while cum_length[i] < num_to_mask:
i += 1
lengths[i] = num_to_mask - (0 if i == 0 else cum_length[i - 1])
num_to_mask = i + 1
lengths = lengths[:num_to_mask]
# Handle 0-length mask (inserts) separately
lengths = lengths[lengths > 0]
num_inserts = num_to_mask - lengths.size(0)
num_to_mask -= num_inserts
if num_to_mask == 0:
return self.add_insertion_noise(source, num_inserts / source.size(0))
assert (lengths > 0).all()
else:
lengths = torch.ones((num_to_mask,)).long()
assert is_word_start[-1] == 0
word_starts = is_word_start.nonzero(as_tuple=False)
indices = word_starts[
torch.randperm(word_starts.size(0))[:num_to_mask]
].squeeze(1)
mask_random = torch.FloatTensor(num_to_mask).uniform_() < self.random_ratio
source_length = source.size(0)
assert source_length - 1 not in indices
to_keep = torch.ones(source_length, dtype=torch.bool)
is_word_start[
-1
] = 255 # acts as a long length, so spans don't go over the end of doc
if self.replace_length == 0:
to_keep[indices] = 0
else:
# keep index, but replace it with [MASK]
source[indices] = self.mask_idx
source[indices[mask_random]] = torch.randint(
1, len(self.vocab), size=(mask_random.sum(),)
)
if self.mask_span_distribution is not None:
assert len(lengths.size()) == 1
assert lengths.size() == indices.size()
lengths -= 1
while indices.size(0) > 0:
assert lengths.size() == indices.size()
lengths -= is_word_start[indices + 1].long()
uncompleted = lengths >= 0
indices = indices[uncompleted] + 1
mask_random = mask_random[uncompleted]
lengths = lengths[uncompleted]
if self.replace_length != -1:
# delete token
to_keep[indices] = 0
else:
# keep index, but replace it with [MASK]
source[indices] = self.mask_idx
source[indices[mask_random]] = torch.randint(
1, len(self.vocab), size=(mask_random.sum(),)
)
else:
# A bit faster when all lengths are 1
while indices.size(0) > 0:
uncompleted = is_word_start[indices + 1] == 0
indices = indices[uncompleted] + 1
mask_random = mask_random[uncompleted]
if self.replace_length != -1:
# delete token
to_keep[indices] = 0
else:
# keep index, but replace it with [MASK]
source[indices] = self.mask_idx
source[indices[mask_random]] = torch.randint(
1, len(self.vocab), size=(mask_random.sum(),)
)
assert source_length - 1 not in indices
source = source[to_keep]
if num_inserts > 0:
source = self.add_insertion_noise(source, num_inserts / source.size(0))
return source
def add_permuted_noise(self, tokens, p):
num_words = len(tokens)
num_to_permute = math.ceil(((num_words * 2) * p) / 2.0)
substitutions = torch.randperm(num_words - 2)[:num_to_permute] + 1
tokens[substitutions] = tokens[substitutions[torch.randperm(num_to_permute)]]
return tokens
def add_rolling_noise(self, tokens):
offset = np.random.randint(1, max(1, tokens.size(-1) - 1) + 1)
tokens = torch.cat(
(tokens[0:1], tokens[offset:-1], tokens[1:offset], tokens[-1:]),
dim=0,
)
return tokens
def add_insertion_noise(self, tokens, p):
if p == 0.0:
return tokens
num_tokens = len(tokens)
n = int(math.ceil(num_tokens * p))
noise_indices = torch.randperm(num_tokens + n - 2)[:n] + 1
noise_mask = torch.zeros(size=(num_tokens + n,), dtype=torch.bool)
noise_mask[noise_indices] = 1
result = torch.LongTensor(n + len(tokens)).fill_(-1)
num_random = int(math.ceil(n * self.random_ratio))
result[noise_indices[num_random:]] = self.mask_idx
result[noise_indices[:num_random]] = torch.randint(
low=1, high=len(self.vocab), size=(num_random,)
)
result[~noise_mask] = tokens
assert (result >= 0).all()
return result
def collater(self, samples, pad_to_length=None):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch of data
"""
return collate(
samples, self.vocab.pad(), self.eos, self.vocab, pad_to_length=pad_to_length
)
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return self.sizes[index]
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return self.sizes[index]
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
indices = np.random.permutation(len(self))
else:
indices = np.arange(len(self))
return indices[np.argsort(self.sizes[indices], kind="mergesort")]
def prefetch(self, indices):
self.src.prefetch(indices)
self.tgt.prefetch(indices)
@property
def supports_prefetch(self):
return (
hasattr(self.src, "supports_prefetch")
and self.src.supports_prefetch
and hasattr(self.tgt, "supports_prefetch")
and self.tgt.supports_prefetch
)
| COCO-LM/fairseq/fairseq/data/denoising_dataset.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/data/denoising_dataset.py",
"repo_id": "COCO-LM",
"token_count": 7651
} | 182 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from fairseq import file_utils
from fairseq.data.encoders import register_bpe
from fairseq.dataclass import FairseqDataclass
@dataclass
class SubwordNMTBPEConfig(FairseqDataclass):
bpe_codes: str = field(default="???", metadata={"help": "path to subword NMT BPE"})
bpe_separator: str = field(default="@@", metadata={"help": "BPE separator"})
@register_bpe("subword_nmt", dataclass=SubwordNMTBPEConfig)
class SubwordNMTBPE(object):
def __init__(self, cfg):
if cfg.bpe_codes is None:
raise ValueError("--bpe-codes is required for --bpe=subword_nmt")
codes = file_utils.cached_path(cfg.bpe_codes)
try:
from subword_nmt import apply_bpe
bpe_parser = apply_bpe.create_parser()
bpe_args = bpe_parser.parse_args(
[
"--codes",
codes,
"--separator",
cfg.bpe_separator,
]
)
self.bpe = apply_bpe.BPE(
bpe_args.codes,
bpe_args.merges,
bpe_args.separator,
None,
bpe_args.glossaries,
)
self.bpe_symbol = bpe_args.separator + " "
except ImportError:
raise ImportError(
"Please install subword_nmt with: pip install subword-nmt"
)
def encode(self, x: str) -> str:
return self.bpe.process_line(x)
def decode(self, x: str) -> str:
return (x + " ").replace(self.bpe_symbol, "").rstrip()
| COCO-LM/fairseq/fairseq/data/encoders/subword_nmt_bpe.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/data/encoders/subword_nmt_bpe.py",
"repo_id": "COCO-LM",
"token_count": 882
} | 183 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from . import FairseqDataset, data_utils
def collate(samples, pad_idx, eos_idx):
if len(samples) == 0:
return {}
def merge(key, is_list=False):
if is_list:
res = []
for i in range(len(samples[0][key])):
res.append(
data_utils.collate_tokens(
[s[key][i] for s in samples],
pad_idx,
eos_idx,
left_pad=False,
)
)
return res
else:
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx,
left_pad=False,
)
src_tokens = merge("source")
if samples[0]["target"] is not None:
is_target_list = isinstance(samples[0]["target"], list)
target = merge("target", is_target_list)
else:
target = src_tokens
return {
"id": torch.LongTensor([s["id"] for s in samples]),
"nsentences": len(samples),
"ntokens": sum(len(s["source"]) for s in samples),
"net_input": {
"src_tokens": src_tokens,
"src_lengths": torch.LongTensor([s["source"].numel() for s in samples]),
},
"target": target,
}
class MonolingualDataset(FairseqDataset):
"""
A wrapper around torch.utils.data.Dataset for monolingual data.
Args:
dataset (torch.utils.data.Dataset): dataset to wrap
sizes (List[int]): sentence lengths
vocab (~fairseq.data.Dictionary): vocabulary
shuffle (bool, optional): shuffle the elements before batching
(default: True).
"""
def __init__(
self,
dataset,
sizes,
src_vocab,
tgt_vocab=None,
add_eos_for_other_targets=False,
shuffle=False,
targets=None,
add_bos_token=False,
):
self.dataset = dataset
self.sizes = np.array(sizes)
self.vocab = src_vocab
self.tgt_vocab = tgt_vocab or src_vocab
self.add_eos_for_other_targets = add_eos_for_other_targets
self.shuffle = shuffle
self.add_bos_token = add_bos_token
assert targets is None or all(
t in {"self", "future", "past"} for t in targets
), "targets must be none or one of 'self', 'future', 'past'"
if targets is not None and len(targets) == 0:
targets = None
self.targets = targets
def __getitem__(self, index):
if self.targets is not None:
# *future_target* is the original sentence
# *source* is shifted right by 1 (maybe left-padded with eos)
# *past_target* is shifted right by 2 (left-padded as needed)
#
# Left-to-right language models should condition on *source* and
# predict *future_target*.
# Right-to-left language models should condition on *source* and
# predict *past_target*.
source, future_target, past_target = self.dataset[index]
source, target = self._make_source_target(
source, future_target, past_target
)
else:
source = self.dataset[index]
target = None
source, target = self._maybe_add_bos(source, target)
return {"id": index, "source": source, "target": target}
def __len__(self):
return len(self.dataset)
def _make_source_target(self, source, future_target, past_target):
if self.targets is not None:
target = []
if (
self.add_eos_for_other_targets
and (("self" in self.targets) or ("past" in self.targets))
and source[-1] != self.vocab.eos()
):
# append eos at the end of source
source = torch.cat([source, source.new([self.vocab.eos()])])
if "future" in self.targets:
future_target = torch.cat(
[future_target, future_target.new([self.vocab.pad()])]
)
if "past" in self.targets:
# first token is before the start of sentence which is only used in "none" break mode when
# add_eos_for_other_targets is False
past_target = torch.cat(
[
past_target.new([self.vocab.pad()]),
past_target[1:],
source[-2, None],
]
)
for t in self.targets:
if t == "self":
target.append(source)
elif t == "future":
target.append(future_target)
elif t == "past":
target.append(past_target)
else:
raise Exception("invalid target " + t)
if len(target) == 1:
target = target[0]
else:
target = future_target
return source, self._filter_vocab(target)
def _maybe_add_bos(self, source, target):
if self.add_bos_token:
source = torch.cat([source.new([self.vocab.bos()]), source])
if target is not None:
target = torch.cat([target.new([self.tgt_vocab.bos()]), target])
return source, target
def _filter_vocab(self, target):
if len(self.tgt_vocab) != len(self.vocab):
def _filter(target):
mask = target.ge(len(self.tgt_vocab))
if mask.any():
target[mask] = self.tgt_vocab.unk()
return target
if isinstance(target, list):
return [_filter(t) for t in target]
return _filter(target)
return target
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch with the following keys:
- `id` (LongTensor): example IDs in the original input order
- `ntokens` (int): total number of tokens in the batch
- `net_input` (dict): the input to the Model, containing keys:
- `src_tokens` (LongTensor): a padded 2D Tensor of tokens in
the source sentence of shape `(bsz, src_len)`. Padding will
appear on the right.
- `target` (LongTensor): a padded 2D Tensor of tokens in the
target sentence of shape `(bsz, tgt_len)`. Padding will appear
on the right.
"""
return collate(samples, self.vocab.pad(), self.vocab.eos())
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return self.sizes[index]
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return self.sizes[index]
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)
@property
def supports_prefetch(self):
return getattr(self.dataset, "supports_prefetch", False)
def prefetch(self, indices):
self.dataset.prefetch(indices)
| COCO-LM/fairseq/fairseq/data/monolingual_dataset.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/data/monolingual_dataset.py",
"repo_id": "COCO-LM",
"token_count": 4007
} | 184 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from . import BaseWrapperDataset
class PrependDataset(BaseWrapperDataset):
def __init__(self, dataset, prepend_getter, ensure_first_token_is=None):
super().__init__(dataset)
self.prepend_getter = prepend_getter
self.ensure_first_token = ensure_first_token_is
def __getitem__(self, idx):
item = self.dataset[idx]
is_tuple = isinstance(item, tuple)
src = item[0] if is_tuple else item
assert self.ensure_first_token is None or src[0] == self.ensure_first_token
prepend_idx = self.prepend_getter(self.dataset, idx)
assert isinstance(prepend_idx, int)
src[0] = prepend_idx
item = tuple((src,) + item[1:]) if is_tuple else src
return item
| COCO-LM/fairseq/fairseq/data/prepend_dataset.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/data/prepend_dataset.py",
"repo_id": "COCO-LM",
"token_count": 387
} | 185 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from fairseq.data import FairseqDataset, plasma_utils
from fairseq.data.indexed_dataset import best_fitting_int_dtype
from typing import Tuple
class TokenBlockDataset(FairseqDataset):
"""Break a Dataset of tokens into blocks.
Args:
dataset (~torch.utils.data.Dataset): dataset to break into blocks
sizes (List[int]): sentence lengths (required for 'complete' and 'eos')
block_size (int): maximum block size (ignored in 'eos' break mode)
break_mode (str, optional): Mode used for breaking tokens. Values can
be one of:
- 'none': break tokens into equally sized blocks (up to block_size)
- 'complete': break tokens into blocks (up to block_size) such that
blocks contains complete sentences, although block_size may be
exceeded if some sentences exceed block_size
- 'complete_doc': similar to 'complete' mode, but do not
cross document boundaries
- 'eos': each block contains one sentence (block_size is ignored)
include_targets (bool, optional): return next tokens as targets
(default: False).
document_sep_len (int, optional): document separator size (required for
'complete_doc' break mode). Typically 1 if the sentences have eos
and 0 otherwise.
"""
def __init__(
self,
dataset,
sizes,
block_size,
pad,
eos,
break_mode=None,
include_targets=False,
document_sep_len=1,
use_plasma_view=False,
split_path=None,
plasma_path=None,
):
super().__init__()
self.dataset = dataset
self.pad = pad
self.eos = eos
self.include_targets = include_targets
assert len(dataset) > 0
assert len(dataset) == len(sizes)
_sizes, block_to_dataset_index, slice_indices = self._build_slice_indices(
sizes, break_mode, document_sep_len, block_size
)
# use_plasma_view is slower, disable it
self._slice_indices = plasma_utils.PlasmaArray(slice_indices)
self._sizes = plasma_utils.PlasmaArray(_sizes)
self._block_to_dataset_index = plasma_utils.PlasmaArray(
block_to_dataset_index
)
@staticmethod
def _build_slice_indices(
sizes, break_mode, document_sep_len, block_size
) -> Tuple[np.ndarray]:
"""Use token_block_utils_fast to build arrays for indexing into self.dataset"""
try:
from fairseq.data.token_block_utils_fast import (
_get_slice_indices_fast,
_get_block_to_dataset_index_fast,
)
except ImportError:
raise ImportError(
"Please build Cython components with: `pip install --editable .` "
"or `python setup.py build_ext --inplace`"
)
if isinstance(sizes, list):
sizes = np.array(sizes, dtype=np.int64)
else:
if torch.is_tensor(sizes):
sizes = sizes.numpy()
sizes = sizes.astype(np.int64)
break_mode = break_mode if break_mode is not None else "none"
# For "eos" break-mode, block_size is not required parameters.
if break_mode == "eos" and block_size is None:
block_size = 0
slice_indices = _get_slice_indices_fast(
sizes, str(break_mode), block_size, document_sep_len
)
_sizes = slice_indices[:, 1] - slice_indices[:, 0]
# build index mapping block indices to the underlying dataset indices
if break_mode == "eos":
# much faster version for eos break mode
block_to_dataset_index = np.stack(
[
np.arange(len(sizes)), # starting index in dataset
np.zeros(
len(sizes), dtype=np.compat.long
), # starting offset within starting index
np.arange(len(sizes)), # ending index in dataset
],
1,
)
else:
block_to_dataset_index = _get_block_to_dataset_index_fast(
sizes, slice_indices,
)
size_dtype = np.uint16 if block_size < 65535 else np.uint32
num_tokens = slice_indices[-1].max()
slice_indices_dtype = best_fitting_int_dtype(num_tokens)
slice_indices = slice_indices.astype(slice_indices_dtype)
_sizes = _sizes.astype(size_dtype)
block_to_dataset_index = block_to_dataset_index.astype(slice_indices_dtype)
return _sizes, block_to_dataset_index, slice_indices
@property
def slice_indices(self):
return self._slice_indices.array
@property
def sizes(self):
return self._sizes.array
@property
def block_to_dataset_index(self):
return self._block_to_dataset_index.array
def attr(self, attr: str, index: int):
start_ds_idx, _, _ = self.block_to_dataset_index[index]
return self.dataset.attr(attr, start_ds_idx)
def __getitem__(self, index):
start_ds_idx, start_offset, end_ds_idx = self.block_to_dataset_index[index]
buffer = torch.cat(
[self.dataset[idx] for idx in range(start_ds_idx, end_ds_idx + 1)]
)
slice_s, slice_e = self.slice_indices[index]
length = slice_e - slice_s
s, e = start_offset, start_offset + length
item = buffer[s:e]
if self.include_targets:
# *target* is the original sentence (=item)
# *source* is shifted right by 1 (maybe left-padded with eos)
# *past_target* is shifted right by 2 (left-padded as needed)
if s == 0:
source = torch.cat([item.new([self.eos]), buffer[0 : e - 1]])
past_target = torch.cat(
[item.new([self.pad, self.eos]), buffer[0 : e - 2]]
)
else:
source = buffer[s - 1 : e - 1]
if s == 1:
past_target = torch.cat([item.new([self.eos]), buffer[0 : e - 2]])
else:
past_target = buffer[s - 2 : e - 2]
return source, item, past_target
return item
def __len__(self):
return len(self.slice_indices)
@property
def supports_prefetch(self):
return getattr(self.dataset, "supports_prefetch", False)
def prefetch(self, indices):
self.dataset.prefetch(
{
ds_idx
for index in indices
for start_ds_idx, _, end_ds_idx in [self.block_to_dataset_index[index]]
for ds_idx in range(start_ds_idx, end_ds_idx + 1)
}
)
| COCO-LM/fairseq/fairseq/data/token_block_dataset.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/data/token_block_dataset.py",
"repo_id": "COCO-LM",
"token_count": 3345
} | 186 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import logging
import os
import pickle
import random
import socket
import struct
import subprocess
import warnings
from argparse import Namespace
from collections import OrderedDict
from dataclasses import dataclass
from typing import Any, Dict, List, Mapping, Optional
import torch
import torch.distributed as dist
from fairseq.dataclass.configs import DistributedTrainingConfig, FairseqConfig
from omegaconf import open_dict
try:
import torch_xla.core.xla_model as xm
except ImportError:
xm = None
# Flag to indicate if we're using Megatron
# NOTE: this is a temporary hack until we move away from Megatron's model parallel init
_USE_MEGATRON = False
# Whether to use XLA ops (e.g., on TPUs) instead of CUDA ops.
_USE_XLA = False
logger = logging.getLogger(__name__)
def is_master(cfg: DistributedTrainingConfig):
return cfg.distributed_rank == 0
def infer_init_method(cfg: DistributedTrainingConfig, force_distributed=False):
if cfg.distributed_init_method is not None or cfg.tpu:
return
num_pipelines_per_node = None
if cfg.pipeline_model_parallel:
num_pipeline_devices, num_pipelines_per_node = _pipeline_parallel_pre_init(cfg)
if all(
key in os.environ
for key in ["MASTER_ADDR", "MASTER_PORT", "WORLD_SIZE", "RANK"]
):
# support torch.distributed.launch
_infer_torch_distributed_launch_init(cfg)
elif cfg.distributed_port > 0:
# we can determine the init method automatically for Slurm
_infer_slurm_init(cfg, num_pipelines_per_node)
elif cfg.distributed_world_size > 1 or force_distributed:
# fallback for single node with multiple GPUs
_infer_single_node_init(cfg)
if cfg.pipeline_model_parallel:
_pipeline_parallel_post_init(cfg, num_pipeline_devices, num_pipelines_per_node)
elif not cfg.distributed_no_spawn:
with open_dict(cfg):
cfg.distributed_num_procs = min(
torch.cuda.device_count(), cfg.distributed_world_size
)
def _infer_torch_distributed_launch_init(cfg: DistributedTrainingConfig):
cfg.distributed_init_method = "env://"
cfg.distributed_world_size = int(os.environ["WORLD_SIZE"])
cfg.distributed_rank = int(os.environ["RANK"])
# processes are created by torch.distributed.launch
cfg.distributed_no_spawn = True
def _infer_slurm_init(cfg: DistributedTrainingConfig, num_pipelines_per_node):
node_list = os.environ.get("SLURM_STEP_NODELIST")
if node_list is None:
node_list = os.environ.get("SLURM_JOB_NODELIST")
if node_list is not None:
try:
hostnames = subprocess.check_output(
["scontrol", "show", "hostnames", node_list]
)
cfg.distributed_init_method = "tcp://{host}:{port}".format(
host=hostnames.split()[0].decode("utf-8"),
port=cfg.distributed_port,
)
nnodes = int(os.environ.get("SLURM_NNODES"))
ntasks_per_node = os.environ.get("SLURM_NTASKS_PER_NODE")
if ntasks_per_node is not None:
ntasks_per_node = int(ntasks_per_node)
else:
ntasks = int(os.environ.get("SLURM_NTASKS"))
nnodes = int(os.environ.get("SLURM_NNODES"))
assert ntasks % nnodes == 0
ntasks_per_node = int(ntasks / nnodes)
if ntasks_per_node == 1:
gpus_per_node = torch.cuda.device_count()
node_id = int(os.environ.get("SLURM_NODEID"))
cfg.distributed_rank = node_id * gpus_per_node
cfg.distributed_world_size = nnodes * gpus_per_node
elif cfg.pipeline_model_parallel:
assert ntasks_per_node == num_pipelines_per_node, (
"SLURM --ntasks-per-node must match number of pipelines per "
"node (={})".format(num_pipelines_per_node)
)
cfg.distributed_no_spawn = True
# For 4-way MP on nodes with 8 GPUs, ranks will be [0, 1] on
# the first node, [1, 2] on the second node, etc. This
# matches torch.distributed.launch.
node_id = int(os.environ.get("SLURM_NODEID"))
local_id = int(os.environ.get("SLURM_LOCALID"))
cfg.distributed_rank = node_id * num_pipelines_per_node + local_id
# In the above example, device_id will always be in [0, 1],
# which also matches torch.distributed.launch.
cfg.device_id = local_id
# We also want to set distributed_world_size to be the total
# number of pipelines across all nodes.
cfg.distributed_world_size = nnodes * num_pipelines_per_node
else:
assert ntasks_per_node == cfg.distributed_world_size // nnodes
cfg.distributed_no_spawn = True
cfg.distributed_rank = int(os.environ.get("SLURM_PROCID"))
cfg.device_id = int(os.environ.get("SLURM_LOCALID"))
except subprocess.CalledProcessError as e: # scontrol failed
raise e
except FileNotFoundError: # Slurm is not installed
pass
def _infer_single_node_init(cfg: DistributedTrainingConfig):
assert (
cfg.distributed_world_size <= torch.cuda.device_count()
), f"world size is {cfg.distributed_world_size} but have {torch.cuda.device_count()} available devices"
port = random.randint(10000, 20000)
cfg.distributed_init_method = "tcp://localhost:{port}".format(port=port)
def _pipeline_parallel_pre_init(cfg: DistributedTrainingConfig):
from fairseq import utils
balance_exists = (
cfg.pipeline_balance is not None
or cfg.pipeline_encoder_balance is not None
or cfg.pipeline_decoder_balance is not None
)
devices_exist = (
cfg.pipeline_devices is not None
or cfg.pipeline_encoder_devices is not None
or cfg.pipeline_decoder_devices is not None
)
if not balance_exists:
raise ValueError(
"--pipeline-balance is currently required for pipeline model parallelism"
)
if not devices_exist:
raise ValueError(
"--pipeline-devices is currently required for pipeline model parallelism"
)
cfg.pipeline_balance = utils.eval_str_list(cfg.pipeline_balance, type=int)
if cfg.pipeline_devices is not None:
cfg.pipeline_devices = utils.eval_str_list(cfg.pipeline_devices, type=int)
num_pipeline_devices = len(set(cfg.pipeline_devices))
else:
cfg.pipeline_encoder_devices = utils.eval_str_list(
cfg.pipeline_encoder_devices, type=int
)
cfg.pipeline_decoder_devices = utils.eval_str_list(
cfg.pipeline_decoder_devices, type=int
)
num_pipeline_devices = len(
set(cfg.pipeline_encoder_devices + cfg.pipeline_decoder_devices)
)
gpus_per_node = torch.cuda.device_count()
assert (
gpus_per_node >= num_pipeline_devices
and gpus_per_node % num_pipeline_devices == 0
), (
"the number of unique device IDs in --pipeline-devices must evenly divide "
"the number of GPUs per node (multi-node pipelining is not yet supported)"
)
num_pipelines_per_node = gpus_per_node // num_pipeline_devices
return num_pipeline_devices, num_pipelines_per_node
def _pipeline_parallel_post_init(
cfg: DistributedTrainingConfig, num_pipeline_devices, num_pipelines_per_node
):
if not cfg.distributed_no_spawn:
# When distributed_no_spawn is False, we expect distributed_rank and
# distributed_world_size to be based on the total number of GPUs, so
# we need to correct them to be based on the number of pipelines.
assert cfg.distributed_world_size % num_pipeline_devices == 0
cfg.distributed_world_size = (
cfg.distributed_world_size // num_pipeline_devices
)
# In the case of 4-way MP on nodes with 8 GPUs, we want
# distributed_rank to be the starting GPU index for each pipeline
# i.e., 0, 2, ...
gpus_per_node = torch.cuda.device_count()
assert cfg.distributed_rank % gpus_per_node == 0
assert cfg.distributed_rank % num_pipeline_devices == 0
with open_dict(cfg):
cfg.distributed_rank = cfg.distributed_rank // num_pipeline_devices
# launch one process per pipeline
cfg.distributed_num_procs = num_pipelines_per_node
# if we have 4-way MP on a node with 8 GPUs, we want device_ids to be 0
# and 4, indicating the starting device IDs for each pipeline
cfg.device_id *= num_pipeline_devices
if cfg.device_id > 0:
# if there's multiple pipelines on a node (e.g., 4-way MP on an 8
# GPU node), we need to adjust pipeline_devices accordingly
logger.debug(
"setting CUDA device={} on rank {}".format(
cfg.device_id, cfg.distributed_rank
)
)
torch.cuda.set_device(cfg.device_id)
with open_dict(cfg):
cfg.pipeline_devices = [cfg.device_id + d for d in cfg.pipeline_devices]
logger.info(
"setting pipeline_devices={} on rank {}".format(
cfg.pipeline_devices, cfg.distributed_rank
)
)
def distributed_init(cfg: FairseqConfig):
if isinstance(cfg, Namespace):
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
cfg = convert_namespace_to_omegaconf(cfg)
if not cfg.common.tpu:
if torch.distributed.is_available() and torch.distributed.is_initialized():
warnings.warn(
"Distributed is already initialized, cannot initialize twice!"
)
else:
logger.info(
"distributed init (rank {}): {}".format(
cfg.distributed_training.distributed_rank,
cfg.distributed_training.distributed_init_method,
)
)
dist.init_process_group(
backend=cfg.distributed_training.distributed_backend,
init_method=cfg.distributed_training.distributed_init_method,
world_size=cfg.distributed_training.distributed_world_size,
rank=cfg.distributed_training.distributed_rank,
)
logger.info(
"initialized host {} as rank {}".format(
socket.gethostname(),
cfg.distributed_training.distributed_rank,
)
)
# perform a dummy all-reduce to initialize the NCCL communicator
if torch.cuda.is_available():
dist.all_reduce(torch.zeros(1).cuda())
cfg.distributed_training.distributed_rank = torch.distributed.get_rank()
else:
assert xm.xrt_world_size() == cfg.distributed_training.distributed_world_size
global _USE_XLA
_USE_XLA = True
cfg.distributed_training.device_id = xm.get_local_ordinal()
cfg.distributed_training.distributed_rank = xm.get_ordinal()
xm.rendezvous("distributed_init") # wait for all workers
if is_master(cfg.distributed_training):
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
if cfg.common.model_parallel_size > 1:
try:
from fairseq.model_parallel.megatron.mpu import (
initialize_model_parallel,
model_parallel_cuda_manual_seed,
)
except ImportError:
raise ImportError(
"\n\nPlease install the megatron submodule:"
"\n\n git submodule update --init "
"fairseq/model_parallel/megatron"
)
global _USE_MEGATRON
_USE_MEGATRON = True
initialize_model_parallel(cfg.common.model_parallel_size)
model_parallel_cuda_manual_seed(cfg.common.seed)
model_part_number = get_model_parallel_rank()
cfg.checkpoint.checkpoint_suffix += "-model_part-{0}".format(model_part_number)
return cfg.distributed_training.distributed_rank
def distributed_main(i, main, cfg: FairseqConfig, kwargs):
cfg.distributed_training.device_id = i
if torch.cuda.is_available() and not cfg.common.cpu and not cfg.common.tpu:
torch.cuda.set_device(cfg.distributed_training.device_id)
if cfg.distributed_training.distributed_rank is None: # torch.multiprocessing.spawn
cfg.distributed_training.distributed_rank = kwargs.pop("start_rank", 0) + i
cfg.distributed_training.distributed_rank = distributed_init(cfg)
after_distributed_init_fn = kwargs.pop("after_distributed_init_fn", None)
if after_distributed_init_fn:
cfg = after_distributed_init_fn(cfg)
main(cfg, **kwargs)
if torch.distributed.is_initialized():
torch.distributed.barrier(get_global_group())
def call_main(cfg: FairseqConfig, main, **kwargs):
if cfg.distributed_training.distributed_init_method is None:
infer_init_method(cfg.distributed_training)
if cfg.distributed_training.distributed_init_method is not None:
# distributed training
if not cfg.distributed_training.distributed_no_spawn:
start_rank = cfg.distributed_training.distributed_rank
cfg.distributed_training.distributed_rank = None # assign automatically
kwargs["start_rank"] = start_rank
torch.multiprocessing.spawn(
fn=distributed_main,
args=(main, cfg, kwargs),
nprocs=min(
torch.cuda.device_count(),
cfg.distributed_training.distributed_world_size,
),
join=True,
)
else:
distributed_main(cfg.distributed_training.device_id, main, cfg, kwargs)
elif cfg.common.tpu and cfg.distributed_training.distributed_world_size > 1:
import torch_xla.distributed.xla_multiprocessing as xmp
torch.multiprocessing.set_sharing_strategy("file_system")
xmp.spawn(
fn=distributed_main,
args=(main, cfg, kwargs),
# tpu-comment:
# 8 devices in one TPU VM, is the max processes to be spawned.
# The rest is driven by xm.distributed.xla_dist
nprocs=min(cfg.distributed_training.distributed_world_size, 8),
)
else:
# single GPU main
main(cfg, **kwargs)
def use_xla():
global _USE_XLA
return _USE_XLA
def new_groups(grouped_ranks: List[List[int]]):
if use_xla():
return ("tpu", grouped_ranks)
else:
groups = [dist.new_group(g) for g in grouped_ranks]
my_group_idx = _find_my_group_index(grouped_ranks)
return groups[my_group_idx]
def _find_my_group_index(grouped_ranks):
my_rank = get_global_rank()
for i, group in enumerate(grouped_ranks):
if my_rank in group:
return i
raise RuntimeError
def _find_my_group(grouped_ranks):
index = _find_my_group_index(grouped_ranks)
return grouped_ranks[index]
def get_rank(group):
if use_xla():
assert group[0] == "tpu"
my_group = _find_my_group(group[1])
return my_group.index(get_global_rank())
else:
return dist.get_rank(group=group)
def get_world_size(group):
if use_xla():
assert group[0] == "tpu"
my_group = _find_my_group(group[1])
return len(my_group)
elif torch.distributed.is_initialized():
return dist.get_world_size(group=group)
else:
return 1
def get_global_group():
if use_xla():
return new_groups([list(range(get_global_world_size()))])
elif torch.distributed.is_initialized():
if not hasattr(get_global_group, "_global_group"):
# ideally we could use torch.distributed.group.WORLD, but it seems
# to cause random NCCL hangs in some cases
get_global_group._global_group = dist.new_group()
return get_global_group._global_group
else:
return None
def get_global_rank():
if use_xla():
return xm.get_ordinal()
elif torch.distributed.is_initialized():
return torch.distributed.get_rank()
else:
return 0
def get_global_world_size():
if use_xla():
return xm.xrt_world_size()
elif torch.distributed.is_initialized():
return torch.distributed.get_world_size()
else:
return 1
def get_data_parallel_group():
"""Get the data parallel group the caller rank belongs to."""
global _USE_MEGATRON
if _USE_MEGATRON:
from fairseq.model_parallel.megatron import mpu
return mpu.get_data_parallel_group()
else:
return get_global_group()
def get_data_parallel_rank():
"""Return my rank for the data parallel group."""
return get_rank(get_data_parallel_group())
def get_data_parallel_world_size():
"""Return world size for the data parallel group."""
return get_world_size(get_data_parallel_group())
def get_model_parallel_group():
global _USE_MEGATRON
if _USE_MEGATRON:
from fairseq.model_parallel.megatron import mpu
return mpu.get_model_parallel_group()
else:
return None
def get_model_parallel_rank():
"""Return my rank for the model parallel group."""
return get_rank(get_model_parallel_group())
def get_model_parallel_world_size():
"""Return world size for the model parallel group."""
return get_world_size(get_model_parallel_group())
def all_reduce(tensor, group, op="sum"):
if use_xla():
assert isinstance(group, tuple) and group[0] == "tpu"
tensor = [tensor] # wrap in a list to make xm.all_reduce in-place
return xm.all_reduce(op, tensor, groups=group[1])[0]
else:
if op == "sum":
op = dist.ReduceOp.SUM
elif op == "max":
op = dist.ReduceOp.MAX
else:
raise NotImplementedError
dist.all_reduce(tensor, op=op, group=group)
return tensor
def broadcast(tensor, src, group):
if use_xla():
# XLA doesn't support broadcast, hack it with all_reduce
if get_rank(group) != src:
tensor.zero_()
all_reduce(tensor, group)
else:
dist.broadcast(tensor, src=src, group=group)
def all_to_all(tensor, group):
"""Perform an all-to-all operation on a 1D Tensor."""
assert tensor.dim() == 1
split_count = get_world_size(group=group)
assert tensor.numel() % split_count == 0
if use_xla():
assert isinstance(group, tuple) and group[0] == "tpu"
return xm.all_to_all(
tensor,
split_dimension=0,
concat_dimension=0,
split_count=split_count,
groups=group[1],
)
else:
output = torch.zeros_like(tensor)
dist.all_to_all_single(output, tensor, group=group)
return output
def all_gather(tensor, group, return_tensor=False):
"""Perform an all-gather operation."""
if use_xla():
result = xm.all_gather(tensor, groups=group[1])
world_size = get_world_size(group=group)
result = result.view(world_size, *tensor.size())
if return_tensor:
return result
else:
return [result[i] for i in range(world_size)]
else:
world_size = get_world_size(group=group)
rank = get_rank(group=group)
tensor_list = [
tensor if i == rank else torch.empty_like(tensor) for i in range(world_size)
]
dist.all_gather(tensor_list, tensor, group=group)
if return_tensor:
return torch.stack(tensor_list, dim=0)
else:
return tensor_list
def all_gather_list(data, group=None, max_size=16384):
"""Gathers arbitrary data from all nodes into a list.
Similar to :func:`~torch.distributed.all_gather` but for arbitrary Python
data. Note that *data* must be picklable and any CUDA tensors will be moved
to CPU and returned on CPU as well.
Args:
data (Any): data from the local worker to be gathered on other workers
group: group of the collective
max_size (int, optional): maximum size of the data to be gathered
across workers
"""
from fairseq import utils
if group is None:
group = get_global_group()
rank = get_rank(group=group)
world_size = get_world_size(group=group)
buffer_size = max_size * world_size
if (
not hasattr(all_gather_list, "_buffer")
or all_gather_list._buffer.numel() < buffer_size
):
all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size)
all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory()
buffer = all_gather_list._buffer
buffer.zero_()
cpu_buffer = all_gather_list._cpu_buffer
data = utils.move_to_cpu(data)
enc = pickle.dumps(data)
enc_size = len(enc)
header_size = 4 # size of header that contains the length of the encoded data
size = header_size + enc_size
if size > max_size:
raise ValueError(
"encoded data size ({}) exceeds max_size ({})".format(size, max_size)
)
header = struct.pack(">I", enc_size)
cpu_buffer[:size] = torch.ByteTensor(list(header + enc))
start = rank * max_size
buffer[start : start + size].copy_(cpu_buffer[:size])
all_reduce(buffer, group=group)
buffer = buffer.cpu()
try:
result = []
for i in range(world_size):
out_buffer = buffer[i * max_size : (i + 1) * max_size]
(enc_size,) = struct.unpack(">I", bytes(out_buffer[:header_size].tolist()))
if enc_size > 0:
result.append(
pickle.loads(
bytes(out_buffer[header_size : header_size + enc_size].tolist())
)
)
return result
except pickle.UnpicklingError:
raise Exception(
"Unable to unpickle data from other workers. all_gather_list requires all "
"workers to enter the function together, so this error usually indicates "
"that the workers have fallen out of sync somehow. Workers can fall out of "
"sync if one of them runs out of memory, or if there are other conditions "
"in your training script that can cause one worker to finish an epoch "
"while other workers are still iterating over their portions of the data. "
"Try rerunning with --ddp-backend=legacy_ddp and see if that helps."
)
def all_reduce_dict(data: Mapping[str, Any], device, group) -> Dict[str, Any]:
"""
AllReduce a dictionary of values across workers. We separately
reduce items that are already on the device and items on CPU for
better performance.
Args:
data (Mapping[str, Any]): dictionary of data to all-reduce, but
cannot be a nested dictionary
device (torch.device): device for the reduction
group: group of the collective
"""
data_keys = list(data.keys())
# We want to separately reduce items that are already on the
# device and items on CPU for performance reasons.
cpu_data = OrderedDict()
device_data = OrderedDict()
for k in data_keys:
t = data[k]
if not torch.is_tensor(t):
cpu_data[k] = torch.tensor(t, dtype=torch.double)
elif t.device.type != device.type:
cpu_data[k] = t.to(dtype=torch.double)
else:
device_data[k] = t.to(dtype=torch.double)
def _all_reduce_dict(data: OrderedDict):
if len(data) == 0:
return data
buf = torch.cat([t.view(-1) for t in data.values()]).to(device=device)
all_reduce(buf, group=group)
split_buf = torch.split(buf, [t.numel() for t in data.values()])
reduced_data = [t.view_as(orig) for t, orig in zip(split_buf, data.values())]
return OrderedDict(zip(data.keys(), reduced_data))
cpu_data = _all_reduce_dict(cpu_data)
device_data = _all_reduce_dict(device_data)
def get_from_stack(key):
if key in cpu_data:
return cpu_data[key]
elif key in device_data:
return device_data[key]
raise KeyError
return OrderedDict([(key, get_from_stack(key)) for key in data_keys])
def broadcast_tensors(
tensors: Optional[List[torch.Tensor]],
src_rank: int,
group: object,
dist_device: Optional[torch.device] = None,
) -> List[torch.Tensor]:
"""
Broadcasts a list of tensors without other (non-src) ranks needing to know
the dtypes/shapes of the tensors.
"""
if dist_device is None:
if torch.distributed.get_backend(group) == "nccl":
dist_device = torch.device("cuda")
else:
dist_device = torch.device("cpu")
# share metadata first to simplify transfer
is_src_rank = (get_rank(group) == src_rank)
if is_src_rank:
metadata = [
{"size": t.size(), "dtype": t.dtype, "device": t.device} for t in tensors
]
metadata = _broadcast_object_slow(metadata, src_rank, group, dist_device)
else:
metadata = _broadcast_object_slow(None, src_rank, group, dist_device)
out_tensors = []
for i, meta in enumerate(metadata):
if is_src_rank:
tensor = tensors[i]
broadcast(tensors[i].to(dist_device), src=src_rank, group=group)
else:
tensor = torch.zeros(
[meta["size"].numel()], dtype=meta["dtype"], device=dist_device
)
broadcast(tensor, src=src_rank, group=group)
tensor = tensor.view(meta["size"]).to(meta["device"])
out_tensors.append(tensor)
return out_tensors
def broadcast_object(
obj: Any,
src_rank: int,
group: object,
dist_device: Optional[torch.device] = None,
) -> Any:
"""Broadcast an arbitrary Python object to other workers."""
if dist_device is None:
if torch.distributed.get_backend(group) == "nccl":
dist_device = torch.device("cuda")
else:
dist_device = torch.device("cpu")
if get_rank(group) == src_rank:
# split the tensors from the non-tensors so we can broadcast them
# directly, avoiding unnecessary serialization/deserialization
tensors = []
obj = _split_tensors_from_obj(obj, tensors)
obj = _broadcast_object_slow(obj, src_rank, group, dist_device)
tensors = broadcast_tensors(tensors, src_rank, group, dist_device)
else:
obj = _broadcast_object_slow(None, src_rank, group, dist_device)
tensors = broadcast_tensors(None, src_rank, group, dist_device)
return _put_tensors_in_obj(obj, tensors)
def _broadcast_object_slow(
obj: Any, src_rank: int, group: object, dist_device: torch.device,
) -> Any:
if get_rank(group) == src_rank:
# Emit data
buffer = io.BytesIO()
torch.save(obj, buffer)
buffer = torch.ByteTensor(buffer.getbuffer()).to(dist_device)
length = torch.LongTensor([len(buffer)]).to(dist_device)
broadcast(length, src=src_rank, group=group)
broadcast(buffer, src=src_rank, group=group)
else:
# Fetch from the source
length = torch.LongTensor([0]).to(dist_device)
broadcast(length, src=src_rank, group=group)
buffer = torch.ByteTensor(int(length.item())).to(dist_device)
broadcast(buffer, src=src_rank, group=group)
buffer = io.BytesIO(buffer.cpu().numpy())
obj = torch.load(buffer, map_location="cpu")
return obj
@dataclass(frozen=True)
class _TensorPlaceholder:
index: int
def _split_tensors_from_obj(obj: Any, tensors: List[torch.Tensor]) -> Any:
if torch.is_tensor(obj):
placeholder = _TensorPlaceholder(index=len(tensors))
tensors.append(obj)
return placeholder
elif isinstance(obj, dict):
return {k: _split_tensors_from_obj(v, tensors) for k, v in obj.items()}
elif isinstance(obj, list):
return [_split_tensors_from_obj(v, tensors) for v in obj]
elif isinstance(obj, tuple):
return tuple(_split_tensors_from_obj(v, tensors) for v in obj)
elif isinstance(obj, set):
return {_split_tensors_from_obj(v, tensors) for v in obj}
else:
return obj
def _put_tensors_in_obj(obj: Any, tensors: List[torch.Tensor]) -> Any:
if isinstance(obj, _TensorPlaceholder):
return tensors[obj.index]
elif isinstance(obj, dict):
return {k: _put_tensors_in_obj(v, tensors) for k, v in obj.items()}
elif isinstance(obj, list):
return [_put_tensors_in_obj(v, tensors) for v in obj]
elif isinstance(obj, tuple):
return tuple(_put_tensors_in_obj(v, tensors) for v in obj)
elif isinstance(obj, set):
return {_put_tensors_in_obj(v, tensors) for v in obj}
else:
return obj
| COCO-LM/fairseq/fairseq/distributed/utils.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/distributed/utils.py",
"repo_id": "COCO-LM",
"token_count": 13032
} | 187 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.modules import (
AdaptiveSoftmax,
LayerNorm,
MultiheadAttention,
PositionalEmbedding,
)
EncoderOut = namedtuple(
"TransformerEncoderOut",
[
"encoder_out", # T x B x C
"encoder_padding_mask", # B x T
"encoder_embedding", # B x T x C
"encoder_states", # List[T x B x C]
],
)
class TransformerEncoderEmbedding(nn.Module):
""" Encoder Embedding + Positional Embedding """
def __init__(self, args, embed_tokens):
super().__init__()
self.dropout = args.dropout
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
if isinstance(embed_tokens, nn.ModuleList):
self.padding_idx = embed_tokens[0].padding_idx
embed_dim = sum(e.embedding_dim for e in embed_tokens)
else:
self.padding_idx = embed_tokens.padding_idx
embed_dim = embed_tokens.embedding_dim
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = (
PositionalEmbedding(
args.max_source_positions,
embed_dim,
self.padding_idx,
learned=args.encoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
if getattr(args, "layernorm_embedding", False):
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
def forward(self, input):
# embed tokens and positions
src_tokens = input[0]
prev_output_tokens = input[2]
if isinstance(self.embed_tokens, nn.ModuleList):
x_embed_list = []
for embed_tokens_part in self.embed_tokens:
x_embed_list.append(embed_tokens_part(src_tokens))
embedded = torch.cat(x_embed_list, dim=-1)
else:
embedded = self.embed_tokens(src_tokens)
x = embed = self.embed_scale * embedded
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
if self.layernorm_embedding:
x = self.layernorm_embedding(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
return (x, encoder_padding_mask, prev_output_tokens)
class TransformerEncoderLayerNorm(nn.Module):
"""
Layer norm at the the end of all encoder layers if
args.encoder_enormalize_before = True
"""
def __init__(self, args, embed_dim):
super().__init__()
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(self, input):
x = input[0]
encoder_padding_mask = input[1]
prev_output_tokens = input[2]
if self.layer_norm:
x = self.layer_norm(x)
# keeping track of the incremental_state is not supported yet
return (x, encoder_padding_mask, prev_output_tokens)
class TransformerDecoderEmbedding(nn.Module):
""" Decoder Embedding + Positional Embedding """
def __init__(self, args, embed_tokens):
super().__init__()
self.dropout = args.dropout
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = (
sum(e.embedding_dim for e in embed_tokens)
if isinstance(embed_tokens, nn.ModuleList)
else embed_tokens.embedding_dim
)
embed_dim = args.decoder_embed_dim
self.output_embed_dim = args.decoder_output_dim
padding_idx = (
embed_tokens[0].padding_idx
if isinstance(embed_tokens, nn.ModuleList)
else embed_tokens.padding_idx
)
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
args.max_target_positions,
embed_dim,
padding_idx,
learned=args.decoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
def forward(self, input):
mt_task = False
if isinstance(input, tuple):
if len(input) == 3:
encoder_out = input[0]
encoder_padding_mask = input[1]
prev_output_tokens = input[2]
incremental_state = None # Hardcoding to avoid passing of None objects
mt_task = True
else:
# HACK for now, need to fix (TODO sidgoyal)
prev_output_tokens = input[0]
# discard "src_lengths"
encoder_out = None
encoder_padding_mask = None
incremental_state = None
else:
prev_output_tokens = input
encoder_out = None
encoder_padding_mask = None
incremental_state = None
positions = (
self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
)
if self.embed_positions is not None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
if isinstance(self.embed_tokens, nn.ModuleList):
x_embed_list = []
for embed_tokens_part in self.embed_tokens:
x_embed_list.append(embed_tokens_part(prev_output_tokens))
x = self.embed_scale * torch.cat(x_embed_list, dim=-1)
else:
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
if mt_task:
return (x, encoder_out, encoder_padding_mask)
return x
class TransformerDecoderOutputLayer(nn.Module):
def __init__(self, args, embed_tokens, dictionary):
super().__init__()
self.share_input_output_embed = args.share_decoder_input_output_embed
self.embed_tokens = embed_tokens
self.output_embed_dim = args.decoder_output_dim
embed_dim = args.decoder_embed_dim
self.project_out_dim = (
Linear(embed_dim, self.output_embed_dim, bias=False)
if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights
else None
)
self.adaptive_softmax = None
if args.adaptive_softmax_cutoff is not None:
assert not isinstance(embed_tokens, nn.ModuleList)
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
options.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif not self.share_input_output_embed:
self.embed_tokens = nn.Parameter(
torch.Tensor(len(dictionary), self.output_embed_dim)
)
nn.init.normal_(
self.embed_tokens, mean=0, std=self.output_embed_dim ** -0.5
)
if args.decoder_normalize_before and not getattr(
args, "no_decoder_final_norm", False
):
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(self, input, apply_final_proj=True):
if isinstance(input, tuple):
x = input[0]
else:
x = input
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
if apply_final_proj:
x = self.output_layer(x)
return x
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
if self.share_input_output_embed:
if isinstance(self.embed_tokens, nn.ModuleList):
output = None
for i, emb in enumerate(self.embed_tokens):
sidx = i * emb.embedding_dim
eidx = (i + 1) * emb.embedding_dim
if output is None:
output = F.linear(features[:, :, sidx:eidx], emb.weight)
else:
output += F.linear(features[:, :, sidx:eidx], emb.weight)
return output
else:
return F.linear(features, self.embed_tokens.weight)
else:
return F.linear(features, self.embed_tokens)
else:
return features
class TransformerEncoderLayer(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, args):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.self_attn = MultiheadAttention(
self.embed_dim,
args.encoder_attention_heads,
dropout=args.attention_dropout,
self_attention=True,
)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, "activation_fn", "relu")
)
self.activation_dropout = getattr(args, "activation_dropout", 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, "relu_dropout", 0)
self.normalize_before = args.encoder_normalize_before
self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)
self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layer_norms.{}.{}".format(name, old, m)
if k in state_dict:
state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k]
del state_dict[k]
def forward(self, input):
"""
Args:
input (Tuple):
input[0] (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
input[1] (ByteTensor/FloatTensor): encoder padding mask -
binary ByteTensor of shape `(batch, src_len)` where padding elements
are indicated by ``1``.
input[2] (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing)
Returns:
output (Tuple):
output[0] (Tensor): encoded output of shape `(batch, src_len, embed_dim)`
output[1] (ByteTensor/FloatTensor): encoder padding mask
output[2] (LongTensor): previous decoder outputs
"""
x = input[0]
encoder_padding_mask = input[1]
prev_output_tokens = input[2]
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
x, _ = self.self_attn(
query=x, key=x, value=x, key_padding_mask=encoder_padding_mask
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
return (x, encoder_padding_mask, prev_output_tokens)
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
class TransformerDecoderLayer(nn.Module):
"""Decoder layer block.
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.decoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False
):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.self_attn = MultiheadAttention(
embed_dim=self.embed_dim,
num_heads=args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=True,
)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, "activation_fn", "relu")
)
self.activation_dropout = getattr(args, "activation_dropout", 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, "relu_dropout", 0)
self.normalize_before = args.decoder_normalize_before
# use layerNorm rather than FusedLayerNorm for exporting.
# char_inputs can be used to determint this.
# TODO remove this once we update apex with the fix
export = getattr(args, "char_inputs", False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = MultiheadAttention(
self.embed_dim,
args.decoder_attention_heads,
kdim=getattr(args, "encoder_embed_dim", None),
vdim=getattr(args, "encoder_embed_dim", None),
dropout=args.attention_dropout,
encoder_decoder_attention=True,
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)
self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(self, input):
"""
Args:
input (Tuple):
input[0] (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
input[1] (Tensor): encoder output of shape `(batch, src_len, embed_dim)`
input[2] (ByteTensor/FloatTensor): encoder padding mask -
binary ByteTensor of shape `(batch, src_len)` where padding elements
are indicated by ``1``.
Returns:
output (Tuple):
output[0] (Tensor): encoded output of shape `(batch, src_len, embed_dim)`
output[1] (ByteTensor/FloatTensor): encoder padding mask
output[2] (LongTensor): previous decoder outputs
"""
# Note: incremental state is not yet supported
mt_task = False
if isinstance(input, tuple):
x = input[0]
encoder_out = input[1]
encoder_padding_mask = input[2]
incremental_state = None
mt_task = True
else:
x = input
encoder_out = None
encoder_padding_mask = None
incremental_state = None
if incremental_state is None:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
# TODO: add back prev_self_attn_state, prev_attn_state,
# self_attn_padding_mask
prev_self_attn_state = None
prev_attn_state = None
self_attn_padding_mask = None
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
if prev_self_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_self_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.self_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
if self.encoder_attn is not None:
residual = x
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True)
if prev_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=(not self.training and self.need_attn),
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
if mt_task:
return (x, encoder_out, encoder_padding_mask)
return x
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, "_future_mask")
or self._future_mask is None
or self._future_mask.device != tensor.device
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(
utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1
)
return self._future_mask[:dim, :dim]
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
| COCO-LM/fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/layers.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/layers.py",
"repo_id": "COCO-LM",
"token_count": 11018
} | 188 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import signal
import threading
import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
from fairseq.distributed import (
DistributedTimeoutWrapper,
LegacyDistributedDataParallel,
ModuleProxyWrapper,
TPUDistributedDataParallel,
)
logger = logging.getLogger(__name__)
_GOSSIP_DISABLED = False
try:
import gossip
except ImportError:
_GOSSIP_DISABLED = True
def DistributedFairseqModel(args, model, process_group, device):
"""
Wrap a *model* to support distributed data parallel training.
This is similar to the built-in DistributedDataParallel, but allows
additional configuration of the DistributedDataParallel class to
use, and also provides easier access to the wrapped model by
forwarding requests for missing attributes to the wrapped model.
Args:
args (argparse.Namespace): fairseq args
model (BaseFairseqModel): model to wrap
process_group: the c10d process group to be used for distributed data
parallel all-reduction.
device: device to move model to
"""
assert isinstance(model, nn.Module)
if args.tpu:
wrapped_model = TPUDistributedDataParallel(
module=model.to(device),
process_group=process_group,
)
# forward missing getattr and state_dict/load_state_dict to orig model
wrapped_model = ModuleProxyWrapper(wrapped_model)
elif args.ddp_backend in {"c10d", "pytorch_ddp"}:
wrapped_model = DistributedDataParallel(
module=model.to(device),
device_ids=[args.device_id],
output_device=args.device_id,
broadcast_buffers=args.broadcast_buffers,
bucket_cap_mb=args.bucket_cap_mb,
process_group=process_group,
find_unused_parameters=args.find_unused_parameters,
)
# forward missing getattr and state_dict/load_state_dict to orig model
wrapped_model = ModuleProxyWrapper(wrapped_model)
elif args.ddp_backend in {"no_c10d", "legacy_ddp"}:
wrapped_model = LegacyDistributedDataParallel(
module=model.to(device),
buffer_size=2 ** 28,
process_group=process_group,
)
# forward missing getattr and state_dict/load_state_dict to orig model
wrapped_model = ModuleProxyWrapper(wrapped_model)
elif args.ddp_backend == "slow_mo":
if _GOSSIP_DISABLED:
raise ImportError(
"Cannot find gossip library. Please install from: "
"github.com/facebookresearch/stochastic_gradient_push"
)
# The values of slowmo_momentum below were obtained by tuning on the
# En-De 16 dataset by training the transformer_wmt_en_de_large model
if args.slowmo_momentum is None:
if args.distributed_world_size <= 16:
args.slowmo_momentum = 0.0
elif args.distributed_world_size <= 32:
args.slowmo_momentum = 0.2
elif args.distributed_world_size <= 64:
args.slowmo_momentum = 0.5
else:
args.slowmo_momentum = 0.6
wrapped_model = gossip.GossipDataParallel(
module=model.to(device),
device_ids=[args.device_id],
output_device=args.device_id,
broadcast_buffers=args.broadcast_buffers,
nprocs_per_node=args.nprocs_per_node,
slowmo_momentum=args.slowmo_momentum,
localsgd=(args.slowmo_algorithm == "LocalSGD"),
localsgd_frequency=args.localsgd_frequency,
)
# forward missing getattr and state_dict/load_state_dict to orig model
wrapped_model = ModuleProxyWrapper(wrapped_model)
elif args.ddp_backend == "fully_sharded":
try:
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
except ImportError:
raise ImportError(
"Cannot find FullyShardedDataParallel. "
"Please install fairscale with: pip install fairscale"
)
assert isinstance(model, FSDP), "expected model to already be wrapped in FSDP"
wrapped_model = model
if args.memory_efficient_fp16:
wrapped_model = wrapped_model.half()
if not args.cpu_offload:
wrapped_model = wrapped_model.to(device=device)
else:
raise ValueError("Unknown --ddp-backend: " + args.ddp_backend)
# kill hung distributed jobs after a timeout
if getattr(args, "heartbeat_timeout", -1) > 0:
wrapped_model = DistributedTimeoutWrapper(
wrapped_model, timeout=getattr(args, "heartbeat_timeout", -1)
)
return wrapped_model
| COCO-LM/fairseq/fairseq/models/distributed_fairseq_model.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/models/distributed_fairseq_model.py",
"repo_id": "COCO-LM",
"token_count": 2076
} | 189 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
from fairseq import utils
from fairseq.models import (
FairseqMultiModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import (
Embedding,
TransformerDecoder,
TransformerEncoder,
TransformerModel,
base_architecture,
)
@register_model("multilingual_transformer")
class MultilingualTransformerModel(FairseqMultiModel):
"""Train Transformer models for multiple language pairs simultaneously.
Requires `--task multilingual_translation`.
We inherit all arguments from TransformerModel and assume that all language
pairs use a single Transformer architecture. In addition, we provide several
options that are specific to the multilingual setting.
Args:
--share-encoder-embeddings: share encoder embeddings across all source languages
--share-decoder-embeddings: share decoder embeddings across all target languages
--share-encoders: share all encoder params (incl. embeddings) across all source languages
--share-decoders: share all decoder params (incl. embeddings) across all target languages
"""
def __init__(self, encoders, decoders):
super().__init__(encoders, decoders)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
TransformerModel.add_args(parser)
parser.add_argument(
"--share-encoder-embeddings",
action="store_true",
help="share encoder embeddings across languages",
)
parser.add_argument(
"--share-decoder-embeddings",
action="store_true",
help="share decoder embeddings across languages",
)
parser.add_argument(
"--share-encoders",
action="store_true",
help="share encoders across languages",
)
parser.add_argument(
"--share-decoders",
action="store_true",
help="share decoders across languages",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
from fairseq.tasks.multilingual_translation import MultilingualTranslationTask
assert isinstance(task, MultilingualTranslationTask)
# make sure all arguments are present in older models
base_multilingual_architecture(args)
if not hasattr(args, "max_source_positions"):
args.max_source_positions = 1024
if not hasattr(args, "max_target_positions"):
args.max_target_positions = 1024
src_langs = [lang_pair.split("-")[0] for lang_pair in task.model_lang_pairs]
tgt_langs = [lang_pair.split("-")[1] for lang_pair in task.model_lang_pairs]
if args.share_encoders:
args.share_encoder_embeddings = True
if args.share_decoders:
args.share_decoder_embeddings = True
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
# build shared embeddings (if applicable)
shared_encoder_embed_tokens, shared_decoder_embed_tokens = None, None
if args.share_all_embeddings:
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise ValueError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(
dicts=task.dicts,
langs=task.langs,
embed_dim=args.encoder_embed_dim,
build_embedding=build_embedding,
pretrained_embed_path=args.encoder_embed_path,
)
shared_decoder_embed_tokens = shared_encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
if args.share_encoder_embeddings:
shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(
dicts=task.dicts,
langs=src_langs,
embed_dim=args.encoder_embed_dim,
build_embedding=build_embedding,
pretrained_embed_path=args.encoder_embed_path,
)
if args.share_decoder_embeddings:
shared_decoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(
dicts=task.dicts,
langs=tgt_langs,
embed_dim=args.decoder_embed_dim,
build_embedding=build_embedding,
pretrained_embed_path=args.decoder_embed_path,
)
# encoders/decoders for each language
lang_encoders, lang_decoders = {}, {}
def get_encoder(lang):
if lang not in lang_encoders:
if shared_encoder_embed_tokens is not None:
encoder_embed_tokens = shared_encoder_embed_tokens
else:
encoder_embed_tokens = build_embedding(
task.dicts[lang],
args.encoder_embed_dim,
args.encoder_embed_path,
)
lang_encoders[lang] = cls._get_module_class(
True, args, task.dicts[lang], encoder_embed_tokens, src_langs
)
return lang_encoders[lang]
def get_decoder(lang):
if lang not in lang_decoders:
if shared_decoder_embed_tokens is not None:
decoder_embed_tokens = shared_decoder_embed_tokens
else:
decoder_embed_tokens = build_embedding(
task.dicts[lang],
args.decoder_embed_dim,
args.decoder_embed_path,
)
lang_decoders[lang] = cls._get_module_class(
False, args, task.dicts[lang], decoder_embed_tokens, tgt_langs
)
return lang_decoders[lang]
# shared encoders/decoders (if applicable)
shared_encoder, shared_decoder = None, None
if args.share_encoders:
shared_encoder = get_encoder(src_langs[0])
if args.share_decoders:
shared_decoder = get_decoder(tgt_langs[0])
encoders, decoders = OrderedDict(), OrderedDict()
for lang_pair, src, tgt in zip(task.model_lang_pairs, src_langs, tgt_langs):
encoders[lang_pair] = (
shared_encoder if shared_encoder is not None else get_encoder(src)
)
decoders[lang_pair] = (
shared_decoder if shared_decoder is not None else get_decoder(tgt)
)
return MultilingualTransformerModel(encoders, decoders)
@classmethod
def _get_module_class(cls, is_encoder, args, lang_dict, embed_tokens, langs):
module_class = TransformerEncoder if is_encoder else TransformerDecoder
return module_class(args, lang_dict, embed_tokens)
def load_state_dict(self, state_dict, strict=True, model_cfg=None):
state_dict_subset = state_dict.copy()
for k, _ in state_dict.items():
assert k.startswith("models.")
lang_pair = k.split(".")[1]
if lang_pair not in self.models:
del state_dict_subset[k]
super().load_state_dict(state_dict_subset, strict=strict, model_cfg=model_cfg)
@register_model_architecture("multilingual_transformer", "multilingual_transformer")
def base_multilingual_architecture(args):
base_architecture(args)
args.share_encoder_embeddings = getattr(args, "share_encoder_embeddings", False)
args.share_decoder_embeddings = getattr(args, "share_decoder_embeddings", False)
args.share_encoders = getattr(args, "share_encoders", False)
args.share_decoders = getattr(args, "share_decoders", False)
@register_model_architecture(
"multilingual_transformer", "multilingual_transformer_iwslt_de_en"
)
def multilingual_transformer_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 6)
base_multilingual_architecture(args)
| COCO-LM/fairseq/fairseq/models/multilingual_transformer.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/models/multilingual_transformer.py",
"repo_id": "COCO-LM",
"token_count": 4430
} | 190 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
GottBERT: a pure German Language Model
"""
from fairseq.models import register_model
from .hub_interface import RobertaHubInterface
from .model import RobertaModel
@register_model('gottbert')
class GottbertModel(RobertaModel):
@classmethod
def hub_models(cls):
return {
'gottbert-base': 'https://dl.gottbert.de/fairseq/models/gottbert-base.tar.gz',
}
@classmethod
def from_pretrained(cls,
model_name_or_path,
checkpoint_file='model.pt',
data_name_or_path='.',
bpe='hf_byte_bpe',
bpe_vocab='vocab.json',
bpe_merges='merges.txt',
bpe_add_prefix_space=False,
**kwargs
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
bpe_vocab=bpe_vocab,
bpe_merges=bpe_merges,
bpe_add_prefix_space=bpe_add_prefix_space,
**kwargs,
)
return RobertaHubInterface(x['args'], x['task'], x['models'][0])
| COCO-LM/fairseq/fairseq/models/roberta/model_gottbert.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/models/roberta/model_gottbert.py",
"repo_id": "COCO-LM",
"token_count": 790
} | 191 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import logging
import math
from typing import Optional, Tuple
from omegaconf import II
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model
from fairseq.modules import (
Fp32GroupNorm,
Fp32LayerNorm,
GumbelVectorQuantizer,
KmeansVectorQuantizer,
TransposeLast,
)
from fairseq.tasks import FairseqTask
from fairseq.utils import buffered_arange
logger = logging.getLogger(__name__)
AGGREGATOR_CHOICES = ChoiceEnum(["cnn", "gru"])
PROJECT_FEATURES_CHOICES = ChoiceEnum(["none", "same", "new"])
ACTIVATION_CHOICES = ChoiceEnum(["relu", "gelu"])
VQ_TYPE_CHOICES = ChoiceEnum(["none", "gumbel", "kmeans"])
@dataclass
class Wav2VecConfig(FairseqDataclass):
prediction_steps: int = field(
default=12, metadata={"help": "number of steps ahead to predict"}
)
sample_distance: Optional[int] = field(
default=None,
metadata={
"help": "sample distance from target. does not work properly with cross-sampling"
},
)
cross_sample_negatives: int = field(
default=0, metadata={"help": "num of cross sampled negatives"}
)
num_negatives: int = field(
default=10, metadata={"help": "num of cross sampled negatives"}
)
conv_feature_layers: str = field(
default="[(512, 10, 5), (512, 8, 4), (512, 4, 2), (512, 4, 2), (512, 4, 2), (512, 1, 1), (512, 1, 1), (512, 1, 1)]",
metadata={
"help": "convolutional feature extraction layers [(dim, kernel_size, stride), ...]"
},
)
conv_aggregator_layers: str = field(
default="[(512, 2, 1), (512, 3, 1), (512, 4, 1), (512, 5, 1), (512, 6, 1), (512, 7, 1), (512, 8, 1), (512, 9, 1), (512, 10, 1), (512, 11, 1), (512, 12, 1), (512, 13, 1)]",
metadata={
"help": "convolutional aggregator layers [(dim, kernel_size, stride), ...]"
},
)
dropout: float = field(
default=0.0, metadata={"help": "dropout to apply within the model"}
)
dropout_features: float = field(
default=0.0, metadata={"help": "dropout to apply to the features"}
)
dropout_agg: float = field(
default=0.0, metadata={"help": "dropout to apply after aggregation step"}
)
aggregator: AGGREGATOR_CHOICES = field(
default="cnn", metadata={"help": "type of aggregator to use"}
)
gru_dim: int = field(default=512, metadata={"help": "GRU dimensionality"})
no_conv_bias: bool = field(
default=False, metadata={"help": "if set, does not learn bias for conv layers"}
)
agg_zero_pad: bool = field(
default=False,
metadata={"help": "if set, zero pads in aggregator instead of repl pad"},
)
skip_connections_feat: bool = field(
default=False,
metadata={"help": "if set, adds skip connections to the feature extractor"},
)
skip_connections_agg: bool = field(
default=True,
metadata={"help": "if set, adds skip connections to the aggregator"},
)
residual_scale: float = field(
default=0.5, metadata={"help": "scales residual by sqrt(value)"}
)
log_compression: bool = field(
default=True,
metadata={"help": "if set, adds a log compression to feature extractor"},
)
balanced_classes: bool = field(
default=False,
metadata={"help": "if set, loss is scaled to balance for number of negatives"},
)
project_features: PROJECT_FEATURES_CHOICES = field(
default="none",
metadata={
"help": "if not none, features are projected using the (same or new) aggregator"
},
)
non_affine_group_norm: bool = field(
default=False, metadata={"help": "if set, group norm is not affine"}
)
offset: str = field(
default="auto",
metadata={
"help": "if set to 'auto', it is computed automatically from the receptive field, else set to int value"
},
)
activation: ACTIVATION_CHOICES = field(
default="relu",
metadata={
"help": "if set to 'auto', it is computed automatically from the receptive field, else set to int value"
},
)
vq_type: VQ_TYPE_CHOICES = field(
default="none", metadata={"help": "which type of quantizer to use"}
)
vq_vars: int = field(
default=320,
metadata={"help": "project to this many vector quantized variables per group"},
)
vq_groups: int = field(
default=2, metadata={"help": "number of groups of latent variables"}
)
vq_dim: int = field(
default=0,
metadata={
"help": "uses this dimensionality for quantized vectors. 0 to use model dim // groups"
},
)
vq_depth: int = field(
default=1, metadata={"help": "number of layers for vq weight projection"}
)
combine_groups: bool = field(
default=False, metadata={"help": "if set, variables are shared among groups"}
)
vq_temp: Tuple[float, float, float] = field(
default=(2.0, 0.5, 0.999995),
metadata={
"help": "temperature for latent variable sampling with gumbel softmax. should be a tuple of 3 values (start, end, decay)"
},
)
vq_gamma: float = field(
default=0.25,
metadata={"help": "gamma parameter for kmeans style vector quantization"},
)
infonce: bool = II("criterion.infonce")
@register_model("wav2vec", dataclass=Wav2VecConfig)
class Wav2VecModel(BaseFairseqModel):
@classmethod
def build_model(cls, cfg: Wav2VecConfig, task: FairseqTask):
"""Build a new model instance."""
model = Wav2VecModel(cfg)
logger.info(model)
return model
def __init__(self, cfg: Wav2VecConfig):
super().__init__()
self.prediction_steps = cfg.prediction_steps
offset = cfg.offset
if cfg.activation == "relu":
activation = nn.ReLU()
elif cfg.activation == "gelu":
activation = nn.GELU()
else:
raise Exception("unknown activation " + cfg.activation)
feature_enc_layers = eval(cfg.conv_feature_layers)
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
log_compression=cfg.log_compression,
skip_connections=cfg.skip_connections_feat,
residual_scale=cfg.residual_scale,
non_affine_group_norm=cfg.non_affine_group_norm,
activation=activation,
)
embed = feature_enc_layers[-1][0]
self.vector_quantizer = None
if cfg.vq_type == "gumbel":
self.vector_quantizer = GumbelVectorQuantizer(
dim=embed,
num_vars=cfg.vq_vars,
temp=cfg.vq_temp,
groups=cfg.vq_groups,
combine_groups=cfg.combine_groups,
vq_dim=cfg.vq_dim if cfg.vq_dim > 0 else embed,
time_first=False,
activation=activation,
weight_proj_depth=cfg.vq_depth,
weight_proj_factor=2,
)
elif cfg.vq_type == "kmeans":
self.vector_quantizer = KmeansVectorQuantizer(
dim=embed,
num_vars=cfg.vq_vars,
groups=cfg.vq_groups,
combine_groups=cfg.combine_groups,
vq_dim=cfg.vq_dim if cfg.vq_dim > 0 else embed,
time_first=False,
gamma=cfg.vq_gamma,
)
else:
assert (
cfg.vq_type == "none" or cfg.vq_type is None
), "Unknown quantizer type"
if cfg.offset == "auto":
jin = 0
rin = 0
for _, k, stride in feature_enc_layers:
if rin == 0:
rin = k
rin = rin + (k - 1) * jin
if jin == 0:
jin = stride
else:
jin *= stride
offset = math.ceil(rin / jin)
offset = int(offset)
def make_aggregator():
if cfg.aggregator == "cnn":
agg_layers = eval(cfg.conv_aggregator_layers)
agg_dim = agg_layers[-1][0]
feature_aggregator = ConvAggegator(
conv_layers=agg_layers,
embed=embed,
dropout=cfg.dropout,
skip_connections=cfg.skip_connections_agg,
residual_scale=cfg.residual_scale,
non_affine_group_norm=cfg.non_affine_group_norm,
conv_bias=not cfg.no_conv_bias,
zero_pad=cfg.agg_zero_pad,
activation=activation,
)
elif cfg.aggregator == "gru":
agg_dim = cfg.gru_dim
feature_aggregator = nn.Sequential(
TransposeLast(),
nn.GRU(
input_size=embed,
hidden_size=agg_dim,
num_layers=1,
dropout=cfg.dropout,
),
TransposeLast(deconstruct_idx=0),
)
else:
raise Exception("unknown aggregator type " + cfg.aggregator)
return feature_aggregator, agg_dim
self.feature_aggregator, agg_dim = make_aggregator()
self.wav2vec_predictions = Wav2VecPredictionsModel(
in_dim=agg_dim,
out_dim=embed,
prediction_steps=cfg.prediction_steps,
n_negatives=cfg.num_negatives,
cross_sample_negatives=cfg.cross_sample_negatives,
sample_distance=cfg.sample_distance,
dropout=cfg.dropout,
offset=offset,
balanced_classes=cfg.balanced_classes,
infonce=cfg.infonce,
)
self.dropout_feats = nn.Dropout(p=cfg.dropout_features)
self.dropout_agg = nn.Dropout(p=cfg.dropout_agg)
if cfg.project_features == "none":
self.project_features = None
elif cfg.project_features == "same":
self.project_features = self.feature_aggregator
elif cfg.project_features == "new":
self.project_features, _ = make_aggregator()
def forward(self, source):
result = {}
features = self.feature_extractor(source)
if self.vector_quantizer:
q_res = self.vector_quantizer(features)
features = q_res["x"]
for k in q_res.keys():
if k != "x":
result[k] = q_res[k]
x = self.dropout_feats(features)
x = self.feature_aggregator(x)
x = self.dropout_agg(x)
if self.project_features is not None:
features = self.project_features(features)
x, targets = self.wav2vec_predictions(x, features)
result["cpc_logits"] = x
result["cpc_targets"] = targets
return result
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
def max_positions(self):
"""Maximum length supported by the model."""
return sys.maxsize
def get_logits(self, net_output):
logits = net_output["cpc_logits"]
return logits
def get_targets(self, sample, net_output):
t = net_output["cpc_targets"]
if isinstance(t, tuple):
t = t[0]
return t.contiguous()
def get_target_weights(self, targets, net_output):
targets = net_output["cpc_targets"]
if isinstance(targets, tuple) and targets[-1] is not None:
return targets[-1]
return None
def get_extra_losses(self, net_output):
loss = None
if "prob_perplexity" in net_output:
loss = net_output["num_vars"] - net_output["prob_perplexity"]
elif "kmeans_loss" in net_output:
loss = net_output["kmeans_loss"]
return loss
def norm_block(is_layer_norm, dim, affine=True):
if is_layer_norm:
mod = nn.Sequential(
TransposeLast(),
Fp32LayerNorm(dim, elementwise_affine=affine),
TransposeLast(),
)
else:
mod = Fp32GroupNorm(1, dim, affine=affine)
return mod
class ConvFeatureExtractionModel(nn.Module):
def __init__(
self,
conv_layers,
dropout,
log_compression,
skip_connections,
residual_scale,
non_affine_group_norm,
activation,
):
super().__init__()
def block(n_in, n_out, k, stride):
return nn.Sequential(
nn.Conv1d(n_in, n_out, k, stride=stride, bias=False),
nn.Dropout(p=dropout),
norm_block(
is_layer_norm=False, dim=n_out, affine=not non_affine_group_norm
),
activation,
)
in_d = 1
self.conv_layers = nn.ModuleList()
for dim, k, stride in conv_layers:
self.conv_layers.append(block(in_d, dim, k, stride))
in_d = dim
self.log_compression = log_compression
self.skip_connections = skip_connections
self.residual_scale = math.sqrt(residual_scale)
def forward(self, x):
# BxT -> BxCxT
x = x.unsqueeze(1)
for conv in self.conv_layers:
residual = x
x = conv(x)
if self.skip_connections and x.size(1) == residual.size(1):
tsz = x.size(2)
r_tsz = residual.size(2)
residual = residual[..., :: r_tsz // tsz][..., :tsz]
x = (x + residual) * self.residual_scale
if self.log_compression:
x = x.abs()
x = x + 1
x = x.log()
return x
class ZeroPad1d(nn.Module):
def __init__(self, pad_left, pad_right):
super().__init__()
self.pad_left = pad_left
self.pad_right = pad_right
def forward(self, x):
return F.pad(x, (self.pad_left, self.pad_right))
class ConvAggegator(nn.Module):
def __init__(
self,
conv_layers,
embed,
dropout,
skip_connections,
residual_scale,
non_affine_group_norm,
conv_bias,
zero_pad,
activation,
):
super().__init__()
def block(n_in, n_out, k, stride):
# padding dims only really make sense for stride = 1
ka = k // 2
kb = ka - 1 if k % 2 == 0 else ka
pad = (
ZeroPad1d(ka + kb, 0) if zero_pad else nn.ReplicationPad1d((ka + kb, 0))
)
return nn.Sequential(
pad,
nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias),
nn.Dropout(p=dropout),
norm_block(False, n_out, affine=not non_affine_group_norm),
activation,
)
in_d = embed
self.conv_layers = nn.ModuleList()
self.residual_proj = nn.ModuleList()
for dim, k, stride in conv_layers:
if in_d != dim and skip_connections:
self.residual_proj.append(nn.Conv1d(in_d, dim, 1, bias=False))
else:
self.residual_proj.append(None)
self.conv_layers.append(block(in_d, dim, k, stride))
in_d = dim
self.conv_layers = nn.Sequential(*self.conv_layers)
self.skip_connections = skip_connections
self.residual_scale = math.sqrt(residual_scale)
def forward(self, x):
for rproj, conv in zip(self.residual_proj, self.conv_layers):
residual = x
x = conv(x)
if self.skip_connections:
if rproj is not None:
residual = rproj(residual)
x = (x + residual) * self.residual_scale
return x
class Wav2VecPredictionsModel(nn.Module):
def __init__(
self,
in_dim,
out_dim,
prediction_steps,
n_negatives,
cross_sample_negatives,
sample_distance,
dropout,
offset,
balanced_classes,
infonce,
):
super().__init__()
self.n_negatives = n_negatives
self.cross_sample_negatives = cross_sample_negatives
self.sample_distance = sample_distance
self.project_to_steps = nn.ConvTranspose2d(
in_dim, out_dim, (1, prediction_steps)
)
self.dropout = nn.Dropout(p=dropout)
self.offset = offset
self.balanced_classes = balanced_classes
self.infonce = infonce
def sample_negatives(self, y):
bsz, fsz, tsz = y.shape
y = y.transpose(0, 1) # BCT -> CBT
y = y.contiguous().view(fsz, -1) # CBT => C(BxT)
cross_high = tsz * bsz
high = tsz if self.sample_distance is None else min(tsz, self.sample_distance)
assert high > 1
neg_idxs = torch.randint(low=0, high=high, size=(bsz, self.n_negatives * tsz))
with torch.no_grad():
if self.n_negatives > 0:
tszs = (
buffered_arange(tsz)
.unsqueeze(-1)
.expand(-1, self.n_negatives)
.flatten()
)
neg_idxs = torch.randint(
low=0, high=high - 1, size=(bsz, self.n_negatives * tsz)
)
neg_idxs[neg_idxs >= tszs] += 1
if self.cross_sample_negatives > 0:
tszs = (
buffered_arange(tsz)
.unsqueeze(-1)
.expand(-1, self.cross_sample_negatives)
.flatten()
)
cross_neg_idxs = torch.randint(
low=0,
high=cross_high - 1,
size=(bsz, self.cross_sample_negatives * tsz),
)
cross_neg_idxs[cross_neg_idxs >= tszs] += 1
if self.n_negatives > 0:
for i in range(1, bsz):
neg_idxs[i] += i * high
else:
neg_idxs = cross_neg_idxs
if self.cross_sample_negatives > 0 and self.n_negatives > 0:
neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1)
negs = y[..., neg_idxs.view(-1)]
negs = negs.view(
fsz, bsz, self.n_negatives + self.cross_sample_negatives, tsz
).permute(
2, 1, 0, 3
) # to NxBxCxT
return negs
def forward(self, x, y):
x = x.unsqueeze(-1)
x = self.project_to_steps(x) # BxCxTxS
x = self.dropout(x)
negatives = self.sample_negatives(y)
y = y.unsqueeze(0)
targets = torch.cat([y, negatives], dim=0) # Copies x B x C x T
copies = targets.size(0)
bsz, dim, tsz, steps = x.shape
steps = min(steps, tsz - self.offset)
predictions = x.new(
bsz * copies * (tsz - self.offset + 1) * steps
- ((steps + 1) * steps // 2) * copies * bsz
)
if self.infonce:
labels = predictions.new_full(
(predictions.shape[0] // copies,), 0, dtype=torch.long
)
else:
labels = torch.zeros_like(predictions)
weights = (
torch.full_like(labels, 1 / self.n_negatives)
if self.balanced_classes and not self.infonce
else None
)
start = end = 0
for i in range(steps):
offset = i + self.offset
end = start + (tsz - offset) * bsz * copies
if self.infonce:
predictions[start:end] = torch.einsum(
"bct,nbct->tbn", x[..., :-offset, i], targets[..., offset:]
).flatten()
else:
pos_num = (end - start) // copies
predictions[start:end] = torch.einsum(
"bct,nbct->nbt", x[..., :-offset, i], targets[..., offset:]
).flatten()
labels[start : start + pos_num] = 1.0
if weights is not None:
weights[start : start + pos_num] = 1.0
start = end
assert end == predictions.numel(), "{} != {}".format(end, predictions.numel())
if self.infonce:
predictions = predictions.view(-1, copies)
else:
if weights is not None:
labels = (labels, weights)
return predictions, labels
| COCO-LM/fairseq/fairseq/models/wav2vec/wav2vec.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/models/wav2vec/wav2vec.py",
"repo_id": "COCO-LM",
"token_count": 10600
} | 192 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
def gen_forward():
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
blocks = [32, 64, 128, 256]
head = """
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "dynamicconv_cuda.cuh"
std::vector<at::Tensor> dynamicconv_cuda_forward(at::Tensor input, at::Tensor weight, int padding_l) {
at::DeviceGuard g(input.device());
const auto minibatch = input.size(0);
const auto numFeatures = input.size(1);
const auto sequenceLength = input.size(2);
const auto numHeads = weight.size(1);
const auto filterSize = weight.size(2);
const auto numFiltersInBlock = numFeatures / numHeads;
const dim3 blocks(minibatch, numFeatures);
auto output = at::zeros_like(input);
auto stream = at::cuda::getCurrentCUDAStream();
"""
switch = """
switch(filterSize) {
"""
case_k = """
case {k}:
"""
main_block = """
if (padding_l == {pad}) {{
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "dynamicconv_forward", ([&] {{
dynamicconv_forward_kernel<{k}, {b_size}, {pad}, scalar_t>
<<<blocks, {b_size}, 0, stream>>>(
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
output.data<scalar_t>());
}}));
}} else
"""
bad_padding = """
{
std::cout << "WARNING: Unsupported padding size - skipping forward pass" << std::endl;
}
break;\n
"""
end = """
default:
std::cout << "WARNING: Unsupported filter length passed - skipping forward pass" << std::endl;
}
return {output};
}
"""
with open("dynamicconv_cuda_forward.cu", "w") as forward:
forward.write(head)
forward.write(switch)
for k in kernels:
b_size = 32
for b in blocks:
if b > k:
b_size = b
break
forward.write(case_k.format(k=k))
for pad in [k // 2, k - 1]:
forward.write(main_block.format(k=k, b_size=b_size, pad=pad))
forward.write(bad_padding)
forward.write(end)
def gen_backward():
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
thresh = [512, 512, 512, 512, 512, 380, 256, 256]
min_block = [64, 64, 64, 64, 64, 64, 128, 256]
seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]]
head = """
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "dynamicconv_cuda.cuh"
std::vector<at::Tensor> dynamicconv_cuda_backward(at::Tensor gradOutput, int padding_l, at::Tensor input, at::Tensor weight) {
at::DeviceGuard g(input.device());
const auto minibatch = input.size(0);
const auto numFeatures = input.size(1);
const auto sequenceLength = input.size(2);
const auto numHeads = weight.size(1);
const auto filterSize = weight.size(2);
const auto numFiltersInBlock = numFeatures / numHeads;
auto numChunks = 1;
auto gradInput = at::zeros_like(input);
auto gradWeight = at::zeros_like(weight);
auto stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(minibatch, numHeads, numChunks);
"""
sequence_if = """
if (sequenceLength < {seq}) {{
switch(filterSize) {{
"""
case_k = """
case {k}:
"""
chunks_reset = """
numChunks = int(ceilf(sequenceLength/float({b_size})));
blocks = dim3(minibatch, numHeads, numChunks);
"""
main_block = """
if (padding_l == {p}) {{
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {{
dynamicconv_backward_kernel<{k}, {b_size}, {p}, scalar_t>
<<<blocks, {b_size}, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}}));
}} else
"""
bad_padding = """
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;\n
"""
bad_filter = """
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
"""
con_else = """
} else
"""
final_else = """
{
switch(filterSize) {
"""
last_return = """
}
return {gradInput, gradWeight};
}
"""
with open("dynamicconv_cuda_backward.cu", "w") as backward:
backward.write(head)
for seq in seqs:
backward.write(sequence_if.format(seq=seq))
for k, t, m in zip(kernels, thresh, min_block):
backward.write(case_k.format(k=k))
if seq <= t:
b_size = seq
else:
b_size = m
backward.write(chunks_reset.format(b_size=b_size))
for p in [k // 2, k - 1]:
backward.write(main_block.format(k=k, b_size=b_size, p=p))
backward.write(bad_padding)
backward.write(bad_filter)
backward.write(con_else)
backward.write(final_else)
for k, m in zip(kernels, min_block):
backward.write(case_k.format(k=k))
backward.write(chunks_reset.format(b_size=m))
for p in [k // 2, k - 1]:
backward.write(main_block.format(k=k, b_size=m, p=p))
backward.write(bad_padding)
backward.write(bad_filter)
backward.write(last_return)
if __name__ == "__main__":
gen_forward()
gen_backward()
| COCO-LM/fairseq/fairseq/modules/dynamicconv_layer/cuda_function_gen.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/modules/dynamicconv_layer/cuda_function_gen.py",
"repo_id": "COCO-LM",
"token_count": 3476
} | 193 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
class PQConv2d(nn.Module):
"""
Quantized counterpart of nn.Conv2d module. Stores the centroid, the assignments
and the non-quantized biases. The full weight is re-instantiated at each forward
pass and autograd automatically computes the gradients with respect to the
centroids.
Args:
- centroids: centroids of size n_centroids x block_size
- assignments: assignments of the centroids to the subvectors
of size self.out_channels x n_blocks
- bias: the non-quantized bias, must be either torch.Tensor or None
Remarks:
- We refer the reader to the official documentation of the nn.Conv2d module
for the other arguments and the behavior of the module.
- Performance tests on GPU show that this implementation is 10% slower than
the non-quantized nn.Conv2d module for a standard training loop.
- During the backward, the gradients are averaged by cluster and not summed.
This explains the hook registered to the centroids.
"""
def __init__(
self,
centroids,
assignments,
bias,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode="zeros",
):
super(PQConv2d, self).__init__()
self.block_size = centroids.size(1)
self.n_centroids = centroids.size(0)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.padding_mode = padding_mode
# check compatibility
if in_channels // groups * np.prod(self.kernel_size) % self.block_size != 0:
raise ValueError("Wrong PQ sizes")
if len(assignments) % out_channels != 0:
raise ValueError("Wrong PQ sizes")
if in_channels % groups != 0:
raise ValueError("in_channels must be divisible by groups")
if out_channels % groups != 0:
raise ValueError("out_channels must be divisible by groups")
# define parameters
self.centroids = nn.Parameter(centroids, requires_grad=True)
self.register_buffer("assignments", assignments)
self.register_buffer("counts", torch.bincount(assignments).type_as(centroids))
if bias is not None:
self.bias = nn.Parameter(bias)
else:
self.register_parameter("bias", None)
# register hook for averaging gradients per centroids instead of summing
self.centroids.register_hook(lambda x: x / self.counts[:, None])
@property
def weight(self):
return (
self.centroids[self.assignments]
.reshape(-1, self.out_channels, self.block_size)
.permute(1, 0, 2)
.reshape(
self.out_channels, self.in_channels // self.groups, *self.kernel_size
)
)
def forward(self, x):
return F.conv2d(
x,
self.weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
def extra_repr(self):
s = "{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}"
if self.padding != (0,) * len(self.padding):
s += ", padding={padding}"
if self.dilation != (1,) * len(self.dilation):
s += ", dilation={dilation}"
if self.groups != 1:
s += ", groups={groups}"
if self.bias is None:
s += ", bias=False"
if self.padding_mode != "zeros":
s += ", padding_mode={padding_mode}"
s += ", n_centroids={n_centroids}, block_size={block_size}"
return s.format(**self.__dict__)
| COCO-LM/fairseq/fairseq/modules/quantization/pq/modules/qconv.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/modules/quantization/pq/modules/qconv.py",
"repo_id": "COCO-LM",
"token_count": 1860
} | 194 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Any, Optional
import torch
import torch.onnx.operators
from fairseq import utils
from torch import Tensor, nn
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1024):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx if padding_idx is not None else 0
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size, embedding_dim, padding_idx
)
self.onnx_trace = False
self.register_buffer("_float_tensor", torch.FloatTensor(1))
self.max_positions = int(1e5)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
@staticmethod
def get_embedding(
num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None
):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(
1
) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(
num_embeddings, -1
)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
def forward(
self,
input,
incremental_state: Optional[Any] = None,
timestep: Optional[Tensor] = None,
positions: Optional[Any] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
bspair = torch.onnx.operators.shape_as_tensor(input)
bsz, seq_len = bspair[0], bspair[1]
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
# recompute/expand embeddings if needed
self.weights = SinusoidalPositionalEmbedding.get_embedding(
max_pos, self.embedding_dim, self.padding_idx
)
self.weights = self.weights.to(self._float_tensor)
if incremental_state is not None:
# positions is the same for every token when decoding a single step
pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
if self.onnx_trace:
return (
self.weights.index_select(index=self.padding_idx + pos, dim=0)
.unsqueeze(1)
.repeat(bsz, 1, 1)
)
return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
positions = utils.make_positions(
input, self.padding_idx, onnx_trace=self.onnx_trace
)
if self.onnx_trace:
flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))
embedding_shape = torch.cat(
(bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long))
)
embeddings = torch.onnx.operators.reshape_from_tensor_shape(
flat_embeddings, embedding_shape
)
return embeddings
return (
self.weights.index_select(0, positions.view(-1))
.view(bsz, seq_len, -1)
.detach()
)
| COCO-LM/fairseq/fairseq/modules/sinusoidal_positional_embedding.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/modules/sinusoidal_positional_embedding.py",
"repo_id": "COCO-LM",
"token_count": 1835
} | 195 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from collections.abc import Collection
from dataclasses import dataclass, field
from typing import List
import torch
import torch.distributed as dist
import torch.optim
from fairseq.dataclass import FairseqDataclass
from fairseq.optim import FairseqOptimizer, register_optimizer
from fairseq.optim.fused_adam import get_fused_adam_class
from omegaconf import II, DictConfig
logger = logging.getLogger(__name__)
@dataclass
class FairseqAdamConfig(FairseqDataclass):
adam_betas: str = field(
default="(0.9, 0.999)", metadata={"help": "betas for Adam optimizer"}
)
adam_eps: float = field(
default=1e-8, metadata={"help": "epsilon for Adam optimizer"}
)
weight_decay: float = field(default=0.0, metadata={"help": "weight decay"})
use_old_adam: bool = field(
default=False, metadata={"help": "Use fairseq.optim.adam.Adam"}
)
# TODO common vars below in parent
tpu: bool = II("common.tpu")
lr: List[float] = II("optimization.lr")
@register_optimizer("adam", dataclass=FairseqAdamConfig)
class FairseqAdam(FairseqOptimizer):
"""Adam optimizer for fairseq.
Important note: this optimizer corresponds to the "AdamW" variant of
Adam in its weight decay behavior. As such, it is most closely
analogous to torch.optim.AdamW from PyTorch.
"""
def __init__(self, cfg: DictConfig, params):
super().__init__(cfg)
fused_adam_cls = get_fused_adam_class()
use_fused_adam = (
not getattr(cfg, "use_old_adam", False)
and fused_adam_cls is not None
and torch.cuda.is_available()
)
if getattr(cfg, "tpu", False):
# on TPUs we use the Adam defined here, since it
# automatically casts gradients to FP32
self._optimizer = Adam(params, **self.optimizer_config)
elif use_fused_adam:
logger.info("using FusedAdam")
self._optimizer = fused_adam_cls(params, **self.optimizer_config)
else:
self._optimizer = Adam(params, **self.optimizer_config)
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.cfg.lr[0]
if isinstance(self.cfg.lr, Collection)
else self.cfg.lr,
"betas": eval(self.cfg.adam_betas),
"eps": self.cfg.adam_eps,
"weight_decay": self.cfg.weight_decay,
}
def average_params(self):
"""Reduce Params is only used during BMUF distributed training."""
state_dict = self.optimizer.state_dict()
total_gpus = float(dist.get_world_size())
for _, value in state_dict["state"].items():
value["exp_avg"] /= total_gpus
value["exp_avg_sq"] /= total_gpus
dist.all_reduce(value["exp_avg"], op=dist.ReduceOp.SUM)
dist.all_reduce(value["exp_avg_sq"], op=dist.ReduceOp.SUM)
class Adam(torch.optim.Optimizer):
r"""Implements Adam algorithm.
This implementation is modified from torch.optim.Adam based on:
`Fixed Weight Decay Regularization in Adam`
(see https://arxiv.org/abs/1711.05101)
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
):
defaults = dict(
lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad
)
super(Adam, self).__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, please consider SparseAdam instead"
)
amsgrad = group.get("amsgrad", False)
p_data_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p_data_fp32)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p_data_fp32)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = torch.zeros_like(p_data_fp32)
else:
state["exp_avg"] = state["exp_avg"].to(p_data_fp32)
state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data_fp32)
if amsgrad:
state["max_exp_avg_sq"] = state["max_exp_avg_sq"].to(
p_data_fp32
)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if amsgrad:
max_exp_avg_sq = state["max_exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group["eps"])
else:
denom = exp_avg_sq.sqrt().add_(group["eps"])
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1
if group["weight_decay"] != 0:
p_data_fp32.add_(
p_data_fp32, alpha=-group["weight_decay"] * group["lr"]
)
p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
return loss
| COCO-LM/fairseq/fairseq/optim/adam.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/optim/adam.py",
"repo_id": "COCO-LM",
"token_count": 4083
} | 196 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from fairseq.dataclass import FairseqDataclass
from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
@dataclass
class PassThroughScheduleConfig(FairseqDataclass):
pass
@register_lr_scheduler("pass_through", dataclass=PassThroughScheduleConfig)
class PassThroughScheduleSchedule(FairseqLRScheduler):
"""Delegate lr scheduling to the optimizer."""
def __init__(self, cfg: PassThroughScheduleConfig, optimizer):
super().__init__(cfg, optimizer)
assert (
hasattr(optimizer, "lr_scheduler") and optimizer.lr_scheduler is not None
), "Pass-through schedule can only be used with optimizers with their own schedulers"
def state_dict(self):
return self.optimizer.lr_scheduler.state_dict()
def load_state_dict(self, state_dict):
self.optimizer.lr_scheduler.load_state_dict(state_dict)
def step_begin_epoch(self, epoch):
"""Update the learning rate at the beginning of the given epoch."""
return self.optimizer.lr_scheduler.step_begin_epoch(epoch)
def step_update(self, num_updates):
"""Update the learning rate after each update."""
return self.optimizer.lr_scheduler.step_update(num_updates)
| COCO-LM/fairseq/fairseq/optim/lr_scheduler/pass_through.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/optim/lr_scheduler/pass_through.py",
"repo_id": "COCO-LM",
"token_count": 507
} | 197 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from fairseq.dataclass import FairseqDataclass
from fairseq.scoring import BaseScorer, register_scorer
from fairseq.scoring.tokenizer import EvaluationTokenizer
@dataclass
class WerScorerConfig(FairseqDataclass):
wer_tokenizer: EvaluationTokenizer.ALL_TOKENIZER_TYPES = field(
default="none", metadata={"help": "sacreBLEU tokenizer to use for evaluation"}
)
wer_remove_punct: bool = field(
default=False, metadata={"help": "remove punctuation"}
)
wer_char_level: bool = field(
default=False, metadata={"help": "evaluate at character level"}
)
wer_lowercase: bool = field(default=False, metadata={"help": "lowercasing"})
@register_scorer("wer", dataclass=WerScorerConfig)
class WerScorer(BaseScorer):
def __init__(self, cfg):
super().__init__(cfg)
self.reset()
try:
import editdistance as ed
except ImportError:
raise ImportError("Please install editdistance to use WER scorer")
self.ed = ed
self.tokenizer = EvaluationTokenizer(
tokenizer_type=self.cfg.wer_tokenizer,
lowercase=self.cfg.wer_lowercase,
punctuation_removal=self.cfg.wer_remove_punct,
character_tokenization=self.cfg.wer_char_level,
)
def reset(self):
self.distance = 0
self.ref_length = 0
def add_string(self, ref, pred):
ref_items = self.tokenizer.tokenize(ref).split()
pred_items = self.tokenizer.tokenize(pred).split()
self.distance += self.ed.eval(ref_items, pred_items)
self.ref_length += len(ref_items)
def result_string(self):
return f"WER: {self.score():.2f}"
def score(self):
return 100.0 * self.distance / self.ref_length if self.ref_length > 0 else 0
| COCO-LM/fairseq/fairseq/scoring/wer.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/scoring/wer.py",
"repo_id": "COCO-LM",
"token_count": 796
} | 198 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
from fairseq import utils
from fairseq.data import (
ConcatSentencesDataset,
Dictionary,
IdDataset,
NestedDictionaryDataset,
NumelDataset,
NumSamplesDataset,
OffsetTokensDataset,
PrependTokenDataset,
RawLabelDataset,
RightPadDataset,
RollDataset,
SortDataset,
StripTokenDataset,
data_utils,
)
from fairseq.data.shorten_dataset import maybe_shorten_dataset
from fairseq.tasks import LegacyFairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task("sentence_prediction")
class SentencePredictionTask(LegacyFairseqTask):
"""
Sentence (or sentence pair) prediction (classification or regression) task.
Args:
dictionary (Dictionary): the dictionary for the input of the task
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument("data", metavar="FILE", help="file prefix for data")
parser.add_argument(
"--num-classes",
type=int,
default=-1,
help="number of classes or regression targets",
)
parser.add_argument(
"--init-token",
type=int,
default=None,
help="add token at the beginning of each batch item",
)
parser.add_argument(
"--separator-token",
type=int,
default=None,
help="add separator token between inputs",
)
parser.add_argument("--regression-target", action="store_true", default=False)
parser.add_argument("--no-shuffle", action="store_true", default=False)
parser.add_argument(
"--shorten-method",
default="none",
choices=["none", "truncate", "random_crop"],
help="if not none, shorten sequences that exceed --tokens-per-sample",
)
parser.add_argument(
"--shorten-data-split-list",
default="",
help="comma-separated list of dataset splits to apply shortening to, "
'e.g., "train,valid" (default: all dataset splits)',
)
parser.add_argument(
"--add-prev-output-tokens",
action="store_true",
default=False,
help="add prev_output_tokens to sample, used for encoder-decoder arch",
)
def __init__(self, args, data_dictionary, label_dictionary):
super().__init__(args)
self.dictionary = data_dictionary
self._label_dictionary = label_dictionary
if not hasattr(args, "max_positions"):
self._max_positions = (
args.max_source_positions,
args.max_target_positions,
)
else:
self._max_positions = args.max_positions
args.tokens_per_sample = self._max_positions
@classmethod
def load_dictionary(cls, args, filename, source=True):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol("<mask>")
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert args.num_classes > 0, "Must set --num-classes"
# load data dictionary
data_dict = cls.load_dictionary(
args,
os.path.join(args.data, "input0", "dict.txt"),
source=True,
)
logger.info("[input] dictionary: {} types".format(len(data_dict)))
# load label dictionary
if not args.regression_target:
label_dict = cls.load_dictionary(
args,
os.path.join(args.data, "label", "dict.txt"),
source=False,
)
logger.info("[label] dictionary: {} types".format(len(label_dict)))
else:
label_dict = data_dict
return cls(args, data_dict, label_dict)
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split (e.g., train, valid, test)."""
def get_path(key, split):
return os.path.join(self.args.data, key, split)
def make_dataset(key, dictionary):
split_path = get_path(key, split)
try:
dataset = data_utils.load_indexed_dataset(
split_path,
dictionary,
self.args.dataset_impl,
combine=combine,
)
except Exception as e:
if "StorageException: [404] Path not found" in str(e):
logger.warning(f"dataset {e} not found")
dataset = None
else:
raise e
return dataset
input0 = make_dataset("input0", self.source_dictionary)
assert input0 is not None, "could not find dataset: {}".format(
get_path("input0", split)
)
input1 = make_dataset("input1", self.source_dictionary)
if self.args.init_token is not None:
input0 = PrependTokenDataset(input0, self.args.init_token)
if input1 is None:
src_tokens = input0
else:
if self.args.separator_token is not None:
input1 = PrependTokenDataset(input1, self.args.separator_token)
src_tokens = ConcatSentencesDataset(input0, input1)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(src_tokens))
src_tokens = maybe_shorten_dataset(
src_tokens,
split,
self.args.shorten_data_split_list,
self.args.shorten_method,
self.max_positions(),
self.args.seed,
)
dataset = {
"id": IdDataset(),
"net_input": {
"src_tokens": RightPadDataset(
src_tokens,
pad_idx=self.source_dictionary.pad(),
),
"src_lengths": NumelDataset(src_tokens, reduce=False),
},
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(src_tokens, reduce=True),
}
if self.args.add_prev_output_tokens:
prev_tokens_dataset = RightPadDataset(
RollDataset(src_tokens, 1),
pad_idx=self.dictionary.pad(),
)
dataset["net_input"].update(
prev_output_tokens=prev_tokens_dataset,
)
if not self.args.regression_target:
label_dataset = make_dataset("label", self.label_dictionary)
if label_dataset is not None:
dataset.update(
target=OffsetTokensDataset(
StripTokenDataset(
label_dataset,
id_to_strip=self.label_dictionary.eos(),
),
offset=-self.label_dictionary.nspecial,
)
)
else:
label_path = "{0}.label".format(get_path("label", split))
if os.path.exists(label_path):
def parse_regression_target(i, line):
values = line.split()
assert (
len(values) == self.args.num_classes
), f'expected num_classes={self.args.num_classes} regression target values on line {i}, found: "{line}"'
return [float(x) for x in values]
with open(label_path) as h:
dataset.update(
target=RawLabelDataset(
[
parse_regression_target(i, line.strip())
for i, line in enumerate(h.readlines())
]
)
)
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[src_tokens.sizes],
)
if self.args.no_shuffle:
dataset = nested_dataset
else:
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
logger.info("Loaded {0} with #samples: {1}".format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
model.register_classification_head(
getattr(args, "classification_head_name", "sentence_classification_head"),
num_classes=self.args.num_classes,
)
return model
def max_positions(self):
return self._max_positions
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
@property
def label_dictionary(self):
return self._label_dictionary
| COCO-LM/fairseq/fairseq/tasks/sentence_prediction.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq/tasks/sentence_prediction.py",
"repo_id": "COCO-LM",
"token_count": 4712
} | 199 |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Evaluate the perplexity of a trained language model.
"""
import logging
import math
import os
import sys
from argparse import Namespace
from typing import Iterable, List, Optional
import torch
import fairseq
from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter
from fairseq.sequence_scorer import SequenceScorer
from omegaconf import DictConfig
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.eval_lm")
def eval_lm(
models: List[fairseq.models.FairseqModel],
source_dictionary: fairseq.data.Dictionary,
batch_iterator: Iterable,
post_process: Optional[str] = None,
output_word_probs: bool = False,
output_word_stats: bool = False,
target_dictionary: Optional[fairseq.data.Dictionary] = None,
softmax_batch: int = False,
remove_bos_token: bool = False,
device: Optional[torch.device] = None,
):
"""
Args:
models (List[~fairseq.models.FairseqModel]): list of models to
evaluate. Models are essentially `nn.Module` instances, but
must be compatible with fairseq's `SequenceScorer`.
source_dictionary (~fairseq.data.Dictionary): dictionary for
applying any relevant post processing or outputing word
probs/stats.
batch_iterator (Iterable): yield batches of data
post_process (Optional[str]): post-process text by removing BPE,
letter segmentation, etc. Valid options can be found in
fairseq.data.utils.post_process, although not all options
are implemented here.
output_word_probs (Optional[bool]): output words and their
predicted log probabilities
output_word_stats (Optional[bool]): output word statistics such
as word count and average probability
target_dictionary (Optional[~fairseq.data.Dictionary]): output
dictionary (defaults to *source_dictionary*)
softmax_batch (Optional[bool]): if BxT is more than this, will
batch the softmax over vocab to this amount of tokens, in
order to fit into GPU memory
remove_bos_token (Optional[bool]): if True, confirm that the
first token is the beginning-of-sentence symbol (according
to the relevant dictionary) and remove it from the output
device (Optional[torch.device]): device to use for evaluation
(defaults to device of first model parameter)
"""
if target_dictionary is None:
target_dictionary = source_dictionary
if device is None:
device = next(models[0].parameters()).device
gen_timer = StopwatchMeter()
scorer = SequenceScorer(target_dictionary, softmax_batch)
score_sum = 0.0
count = 0
if post_process is not None:
if post_process in {"subword_nmt", "@@ "}:
bpe_cont = post_process.rstrip()
bpe_toks = {
i
for i in range(len(source_dictionary))
if source_dictionary[i].endswith(bpe_cont)
}
else:
raise NotImplementedError(
"--post-process={post_process} is not implemented"
)
bpe_len = len(bpe_cont)
else:
bpe_toks = None
bpe_len = 0
word_stats = dict()
for sample in batch_iterator:
if "net_input" not in sample:
continue
sample = utils.move_to_cuda(sample, device=device)
gen_timer.start()
hypos = scorer.generate(models, sample)
gen_timer.stop(sample["ntokens"])
for i, hypos_i in enumerate(hypos):
hypo = hypos_i[0]
sample_id = sample["id"][i]
tokens = hypo["tokens"]
tgt_len = tokens.numel()
pos_scores = hypo["positional_scores"].float()
if remove_bos_token:
assert hypo["tokens"][0].item() == target_dictionary.bos()
tokens = tokens[1:]
pos_scores = pos_scores[1:]
skipped_toks = 0
if bpe_toks is not None:
for i in range(tgt_len - 1):
if tokens[i].item() in bpe_toks:
skipped_toks += 1
pos_scores[i + 1] += pos_scores[i]
pos_scores[i] = 0
inf_scores = pos_scores.eq(float("inf")) | pos_scores.eq(float("-inf"))
if inf_scores.any():
logger.info(
"skipping tokens with inf scores:",
target_dictionary.string(tokens[inf_scores.nonzero()]),
)
pos_scores = pos_scores[(~inf_scores).nonzero()]
score_sum += pos_scores.sum().cpu()
count += pos_scores.numel() - skipped_toks
if output_word_probs or output_word_stats:
w = ""
word_prob = []
is_bpe = False
for i in range(len(tokens)):
w_ind = tokens[i].item()
w += source_dictionary[w_ind]
if bpe_toks is not None and w_ind in bpe_toks:
w = w[:-bpe_len]
is_bpe = True
else:
word_prob.append((w, pos_scores[i].item()))
next_prob = None
ind = i + 1
while ind < len(tokens):
if pos_scores[ind].item() != 0:
next_prob = pos_scores[ind]
break
ind += 1
word_stats.setdefault(w, WordStat(w, is_bpe)).add(
pos_scores[i].item(), next_prob
)
is_bpe = False
w = ""
if output_word_probs:
logger.info(
str(int(sample_id))
+ " "
+ (
"\t".join(
"{} [{:2f}]".format(x[0], x[1]) for x in word_prob
)
)
)
avg_nll_loss = (
-score_sum / count / math.log(2) if count > 0 else 0
) # convert to base 2
logger.info(
"Evaluated {:,} tokens in {:.1f}s ({:.2f} tokens/s)".format(
gen_timer.n, gen_timer.sum, 1.0 / gen_timer.avg if gen_timer.avg > 0 else 0
)
)
if output_word_stats:
for ws in sorted(word_stats.values(), key=lambda x: x.count, reverse=True):
logger.info(ws)
return {
"loss": avg_nll_loss,
"perplexity": 2 ** avg_nll_loss,
}
class WordStat(object):
def __init__(self, word, is_bpe):
self.word = word
self.is_bpe = is_bpe
self.log_prob = 0
self.next_word_prob = 0
self.count = 0
self.missing_next_words = 0
def add(self, log_prob, next_word_prob):
"""increments counters for the sum of log probs of current word and next
word (given context ending at current word). Since the next word might be at the end of the example,
or it might be not counted because it is not an ending subword unit,
also keeps track of how many of those we have seen"""
if next_word_prob is not None:
self.next_word_prob += next_word_prob
else:
self.missing_next_words += 1
self.log_prob += log_prob
self.count += 1
def __str__(self):
return "{}\t{}\t{}\t{}\t{}\t{}".format(
self.word,
self.count,
self.log_prob,
self.is_bpe,
self.next_word_prob,
self.count - self.missing_next_words,
)
def main(cfg: DictConfig, **unused_kwargs):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
utils.import_user_module(cfg.common)
logger.info(cfg)
if cfg.eval_lm.context_window > 0:
# reduce tokens per sample by the required context window size
cfg.task.tokens_per_sample -= cfg.eval_lm.context_window
# Initialize the task using the current *cfg*
task = tasks.setup_task(cfg.task)
# Load ensemble
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, model_args, task = checkpoint_utils.load_model_ensemble_and_task(
[cfg.common_eval.path],
arg_overrides=eval(cfg.common_eval.model_overrides),
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
task=task,
)
use_fp16 = cfg.common.fp16
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
if use_cuda:
torch.cuda.set_device(cfg.distributed_training.device_id)
# Optimize ensemble for generation and set the source and dest dicts on the model
# (required by scorer)
for model in models:
if use_fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
assert len(models) > 0
logger.info(
"num. model params: {:,}".format(sum(p.numel() for p in models[0].parameters()))
)
# Load dataset splits
task.load_dataset(cfg.dataset.gen_subset)
dataset = task.dataset(cfg.dataset.gen_subset)
logger.info(
"{} {} {:,} examples".format(
cfg.task.data, cfg.dataset.gen_subset, len(dataset)
)
)
itr = task.eval_lm_dataloader(
dataset=dataset,
max_tokens=cfg.dataset.max_tokens or 36000,
batch_size=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
*[model.max_positions() for model in models]
),
num_shards=max(
cfg.dataset.num_shards,
cfg.distributed_training.distributed_world_size,
),
shard_id=max(
cfg.dataset.shard_id,
cfg.distributed_training.distributed_rank,
),
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
context_window=cfg.eval_lm.context_window,
)
itr = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
results = eval_lm(
models=models,
source_dictionary=task.source_dictionary,
batch_iterator=itr,
post_process=cfg.common_eval.post_process,
output_word_probs=cfg.eval_lm.output_word_probs,
output_word_stats=cfg.eval_lm.output_word_stats,
target_dictionary=task.target_dictionary,
softmax_batch=cfg.eval_lm.softmax_batch,
remove_bos_token=getattr(cfg.task, "add_bos_token", False),
)
logger.info(
"Loss (base 2): {:.4f}, Perplexity: {:.2f}".format(
results["loss"], results["perplexity"]
)
)
return results
def cli_main():
parser = options.get_eval_lm_parser()
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(convert_namespace_to_omegaconf(args), main)
if __name__ == "__main__":
cli_main()
| COCO-LM/fairseq/fairseq_cli/eval_lm.py/0 | {
"file_path": "COCO-LM/fairseq/fairseq_cli/eval_lm.py",
"repo_id": "COCO-LM",
"token_count": 5811
} | 200 |
#include <ATen/ATen.h>
#include "compat.h"
// Forward/backward compatiblity hack around
// https://github.com/pytorch/pytorch/commit/3aeb78079bcd68282fe9117088e138b77318e288
// pending more future-proof guidance from upstream.
// struct TypeShim
// {
// const at::Type& payload;
// TypeShim(const at::Type& type) : payload(type) {}
// // Enable trivial conversion to a const at::Type& for pre-3aeb78
// operator const at::Type&(){ return payload; };
// // Enable dispatch switch statements to take *this directly for post-3aeb78
// //operator at::ScalarType(){ return payload.; };
// };
#define DISPATCH_FLOAT_AND_HALF(TYPE, LEVEL, NAME, ...) \
switch(TYPE) \
{ \
case at::ScalarType::Float: \
{ \
using scalar_t_##LEVEL = float; \
__VA_ARGS__; \
break; \
} \
case at::ScalarType::Half: \
{ \
using scalar_t_##LEVEL = at::Half; \
__VA_ARGS__; \
break; \
} \
default: \
AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
}
#define DISPATCH_FLOAT_AND_BF16(TYPE, LEVEL, NAME, ...) \
switch(TYPE) \
{ \
case at::ScalarType::Float: \
{ \
using scalar_t_##LEVEL = float; \
__VA_ARGS__; \
break; \
} \
case at::ScalarType::BFloat16: \
{ \
using scalar_t_##LEVEL = at::BFloat16; \
__VA_ARGS__; \
break; \
} \
default: \
AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
}
#define DISPATCH_FLOAT_AND_HALF_AND_BF16(TYPE, LEVEL, NAME, ...) \
switch(TYPE) \
{ \
case at::ScalarType::Float: \
{ \
using scalar_t_##LEVEL = float; \
__VA_ARGS__; \
break; \
} \
case at::ScalarType::Half: \
{ \
using scalar_t_##LEVEL = at::Half; \
__VA_ARGS__; \
break; \
} \
case at::ScalarType::BFloat16: \
{ \
using scalar_t_##LEVEL = at::BFloat16; \
__VA_ARGS__; \
break; \
} \
default: \
AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
}
#define DISPATCH_FLOAT_HALF_AND_BYTE(TYPE, LEVEL, NAME, ...) \
switch(TYPE) \
{ \
case at::ScalarType::Float: \
{ \
using scalar_t_##LEVEL = float; \
__VA_ARGS__; \
break; \
} \
case at::ScalarType::Half: \
{ \
using scalar_t_##LEVEL = at::Half; \
__VA_ARGS__; \
break; \
} \
case at::ScalarType::Byte: \
{ \
using scalar_t_##LEVEL = uint8_t; \
__VA_ARGS__; \
break; \
} \
default: \
AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
}
#define DISPATCH_DOUBLE_FLOAT_AND_HALF(TYPE, LEVEL, NAME, ...) \
switch(TYPE) \
{ \
case at::ScalarType::Double: \
{ \
using scalar_t_##LEVEL = double; \
__VA_ARGS__; \
break; \
} \
case at::ScalarType::Float: \
{ \
using scalar_t_##LEVEL = float; \
__VA_ARGS__; \
break; \
} \
case at::ScalarType::Half: \
{ \
using scalar_t_##LEVEL = at::Half; \
__VA_ARGS__; \
break; \
} \
default: \
AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
}
#define DISPATCH_DOUBLE_FLOAT_AND_HALF_AND_BF16(TYPE, LEVEL, NAME, ...) \
switch(TYPE) \
{ \
case at::ScalarType::Double: \
{ \
using scalar_t_##LEVEL = double; \
__VA_ARGS__; \
break; \
} \
case at::ScalarType::Float: \
{ \
using scalar_t_##LEVEL = float; \
__VA_ARGS__; \
break; \
} \
case at::ScalarType::Half: \
{ \
using scalar_t_##LEVEL = at::Half; \
__VA_ARGS__; \
break; \
} \
case at::ScalarType::BFloat16: \
{ \
using scalar_t_##LEVEL = at::BFloat16; \
__VA_ARGS__; \
break; \
} \
default: \
AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
}
#define DISPATCH_DOUBLE_AND_FLOAT(TYPE, LEVEL, NAME, ...) \
switch(TYPE) \
{ \
case at::ScalarType::Double: \
{ \
using scalar_t_##LEVEL = double; \
__VA_ARGS__; \
break; \
} \
case at::ScalarType::Float: \
{ \
using scalar_t_##LEVEL = float; \
__VA_ARGS__; \
break; \
} \
default: \
AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
}
template<typename T>
__device__ __forceinline__ T reduce_block_into_lanes
(T *x,
T val,
int lanes=1,
bool share_result=false) // lanes is intended to be <= 32.
{
int tid = threadIdx.x + threadIdx.y*blockDim.x;
int blockSize = blockDim.x*blockDim.y; // blockSize is intended to be a multiple of 32.
if(blockSize >= 64)
{
x[tid] = val;
__syncthreads();
}
#pragma unroll
for(int i = (blockSize >> 1); i >= 64; i >>= 1)
{
if(tid < i)
x[tid] = x[tid] + x[tid+i];
__syncthreads();
}
T final;
if(tid < 32)
{
if(blockSize >= 64)
final = x[tid] + x[tid+32];
else
final = val;
// __SYNCWARP();
#pragma unroll
for(int i = 16; i >= lanes; i >>= 1)
final = final + __shfl_down_sync(0xffffffff, final, i);
}
if(share_result)
{
if(tid < lanes)
x[tid] = final; // EpilogueOp
// Make sure the smem result is visible to all warps.
__syncthreads();
}
return final;
}
template<typename T>
__device__ __forceinline__ T reduce_block_into_lanes_max_op
(T *x,
T val,
int lanes=1,
bool share_result=false) // lanes is intended to be <= 32.
{
int tid = threadIdx.x + threadIdx.y*blockDim.x;
int blockSize = blockDim.x*blockDim.y; // blockSize is intended to be a multiple of 32.
if(blockSize >= 64)
{
x[tid] = val;
__syncthreads();
}
#pragma unroll
for(int i = (blockSize >> 1); i >= 64; i >>= 1)
{
if(tid < i)
x[tid] = fmaxf(fabsf(x[tid]), fabsf(x[tid+i]));
__syncthreads();
}
T final;
if(tid < 32)
{
if(blockSize >= 64)
final = fmaxf(fabsf(x[tid]), fabsf(x[tid+32]));
else
final = val;
// __SYNCWARP();
#pragma unroll
for(int i = 16; i >= lanes; i >>= 1)
final = fmaxf(fabsf(final), fabsf(__shfl_down_sync(0xffffffff, final, i)));
}
if(share_result)
{
if(tid < lanes)
x[tid] = final; // EpilogueOp
// Make sure the smem result is visible to all warps.
__syncthreads();
}
return final;
}
| COCO-LM/fairseq/fused_ops/csrc/type_shim.h/0 | {
"file_path": "COCO-LM/fairseq/fused_ops/csrc/type_shim.h",
"repo_id": "COCO-LM",
"token_count": 3042
} | 201 |
#!/usr/bin/env bash
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# GLUE task name, from ['MNLI', 'QQP', 'QNLI', 'SST-2', 'CoLA', 'RTE', 'MRPC', 'STS-B']
TASK=$1
# Path to pretrained COCO-LM checkpoints
PRETRAINED_MODEL_PATH=$2
# Path to processed GLUE dataset (containing binary files) 'path/to/glue_data'
GLUE_DATA_DIR=$3
# Output path for results and fine-tuned model
OUTPUT_PATH=$4
# Set pretrained model name, from ['cocolm_base', 'cocolm_large']
ARCH=$5
# Set the hyperparameters for the run
N_EPOCH=$6
WARMUP_RATIO=$7
BSZ=$8
LR=$9
SEED=${10}
if [ "$ARCH" = "cocolm_base" ]
then
BINS=64
MAX_DIST=128
else
BINS=128
MAX_DIST=256
fi
BETAS="(0.9,0.98)"
CLIP=0.0
WEIGHT_DECAY=0.01
MAX_TOKENS=2200
if [ ! -e $PRETRAINED_MODEL_PATH ]; then
echo "Checkpoint ${PRETRAINED_MODEL_PATH} doesn't exist"
exit 0
fi
TASK_DATA_DIR=$GLUE_DATA_DIR/$TASK-bin
OPTION=""
METRIC=accuracy
N_CLASSES=2
task_type=SMALL
if [ "$TASK" = "MNLI" ]
then
N_CLASSES=3
OPTION="--valid-subset valid,valid1"
EPOCH_ITER=12452
task_type=LARGE
fi
if [ "$TASK" = "QNLI" ]
then
EPOCH_ITER=3320
task_type=LARGE
fi
if [ "$TASK" = "QQP" ]
then
EPOCH_ITER=11392
task_type=LARGE
fi
if [ "$TASK" = "SST-2" ]
then
EPOCH_ITER=2105
task_type=LARGE
fi
if [ "$TASK" = "MRPC" ]
then
EPOCH_ITER=115
fi
if [ "$TASK" = "RTE" ]
then
EPOCH_ITER=101
fi
if [ "$TASK" = "CoLA" ]
then
METRIC=mcc
EPOCH_ITER=268
fi
if [ "$TASK" = "STS-B" ]
then
METRIC=pearson_spearman
N_CLASSES=1
OPTION="--regression-target"
EPOCH_ITER=180
fi
if [ "$task_type" = "LARGE" ]
then
if [ "$N_EPOCH" = "10" ]
then
echo 'skip'
exit 0
fi
if [ "$WARMUP_RATIO" = "10" ]
then
echo 'skip'
exit 0
fi
# if [ "$BSZ" = "16" ]
# then
# echo 'skip'
# exit 0
# fi
fi
EPOCH_ITER=$((EPOCH_ITER*2)) # expand to itr for bsz=16
BSZ_EXPAND=$((BSZ/16))
MAX_TOKENS=$((MAX_TOKENS*BSZ/16)) # expand to itr for bsz=16
EPOCH_ITER=$((EPOCH_ITER/BSZ_EXPAND))
TOTAL_STEPS=$((EPOCH_ITER*N_EPOCH))
WARMUP_STEPS=$((TOTAL_STEPS/WARMUP_RATIO))
VALIDATE_INTERVAL=$((EPOCH_ITER/2))
OUTPUT_PATH=$OUTPUT_PATH/$TASK/$N_EPOCH-$WARMUP_RATIO-$BSZ-$LR-$SEED
mkdir -p $OUTPUT_PATH
echo $OUTPUT_PATH
if [ -e $OUTPUT_PATH/train_log.txt ]; then
if grep -q 'done training' $OUTPUT_PATH/train_log.txt && grep -q 'Loaded checkpoint' $OUTPUT_PATH/train_log.txt; then
echo "Training log existed"
exit 0
fi
fi
python train.py $TASK_DATA_DIR --fp16 --fp16-init-scale 4 --threshold-loss-scale 1 --fp16-scale-window 128 \
--restore-file $PRETRAINED_MODEL_PATH \
--max-positions 512 \
--max-sentences $BSZ \
--max-tokens $MAX_TOKENS \
--update-freq 1 \
--task sentence_prediction \
--reset-optimizer --reset-dataloader --reset-meters \
--required-batch-size-multiple 1 \
--init-token 0 --separator-token 2 \
--arch $ARCH \
--criterion sentence_prediction $OPTION \
--num-classes $N_CLASSES \
--dropout 0.1 --attention-dropout 0.1 --pooler-dropout 0.1 \
--weight-decay $WEIGHT_DECAY --optimizer adam --adam-betas "$BETAS" --adam-eps 1e-06 \
--clip-norm $CLIP \
--lr-scheduler polynomial_decay --lr $LR --total-num-update $TOTAL_STEPS --warmup-updates $WARMUP_STEPS \
--max-update $TOTAL_STEPS --seed $SEED --save-dir $OUTPUT_PATH --no-progress-bar --log-interval 100 --no-epoch-checkpoints --no-last-checkpoints \
--find-unused-parameters --skip-invalid-size-inputs-valid-test --rel-pos 1 --max-rel-pos $MAX_DIST --rel-pos-bins $BINS \
--best-checkpoint-metric $METRIC --maximize-best-checkpoint-metric --validate-interval-updates $VALIDATE_INTERVAL | tee $OUTPUT_PATH/train_log.txt
| COCO-LM/fairseq/run_glue.sh/0 | {
"file_path": "COCO-LM/fairseq/run_glue.sh",
"repo_id": "COCO-LM",
"token_count": 1738
} | 202 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Split a large file into a train and valid set while respecting document
boundaries. Documents should be separated by a single empty line.
"""
import argparse
import random
import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument("input")
parser.add_argument("sample_output", help="train output file")
parser.add_argument("remainder_output", help="valid output file")
parser.add_argument("-k", type=int, help="remainder size")
parser.add_argument(
"--lines", action="store_true", help="split lines instead of docs"
)
args = parser.parse_args()
assert args.k is not None
sample = []
remainder = []
num_docs = [0]
def update_sample(doc):
if len(sample) < args.k:
sample.append(doc.copy())
else:
i = num_docs[0]
j = random.randrange(i + 1)
if j < args.k:
remainder.append(sample[j])
sample[j] = doc.copy()
else:
remainder.append(doc.copy())
num_docs[0] += 1
doc.clear()
with open(args.input, "r", encoding="utf-8") as h:
doc = []
for i, line in enumerate(h):
if line.strip() == "": # empty line indicates new document
update_sample(doc)
else:
doc.append(line)
if args.lines:
update_sample(doc)
if i % 1000000 == 0:
print(i, file=sys.stderr, end="", flush=True)
elif i % 100000 == 0:
print(".", file=sys.stderr, end="", flush=True)
if len(doc) > 0:
update_sample(doc)
print(file=sys.stderr, flush=True)
assert len(sample) == args.k
with open(args.sample_output, "w", encoding="utf-8") as out:
first = True
for doc in sample:
if not first and not args.lines:
out.write("\n")
first = False
for line in doc:
out.write(line)
with open(args.remainder_output, "w", encoding="utf-8") as out:
first = True
for doc in remainder:
if not first and not args.lines:
out.write("\n")
first = False
for line in doc:
out.write(line)
if __name__ == "__main__":
main()
| COCO-LM/fairseq/scripts/split_train_valid_docs.py/0 | {
"file_path": "COCO-LM/fairseq/scripts/split_train_valid_docs.py",
"repo_id": "COCO-LM",
"token_count": 1183
} | 203 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import unittest
from typing import Sequence
from fairseq.data import LanguagePairDataset, ListDataset, RoundRobinZipDatasets
from tests.test_train import mock_dict
def lang_pair_dataset(lengths: Sequence[int]) -> LanguagePairDataset:
tokens = [[i] * l for i, l in enumerate(lengths)]
return LanguagePairDataset(ListDataset(tokens), lengths, mock_dict())
def sample(id: int, length: int):
return {"id": id, "source": [id] * length, "target": None}
class TestDataset(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_round_robin_zip_datasets(self):
long_dataset = lang_pair_dataset([10, 9, 8, 11])
short_dataset = lang_pair_dataset([11, 9])
dataset = RoundRobinZipDatasets({"a": long_dataset, "b": short_dataset})
# Dataset is now sorted by sentence length
dataset.ordered_indices()
assert dataset.longest_dataset is long_dataset
self.assertEqual(dict(dataset[0]), {"a": sample(2, 8), "b": sample(1, 9)})
# The item 2 of dataset 'a' is with item (2 % 2 = 0) of dataset 'b'
self.assertEqual(dict(dataset[2]), {"a": sample(0, 10), "b": sample(1, 9)})
def test_round_robin_zip_datasets_filtered(self):
long_dataset = lang_pair_dataset([10, 20, 8, 11, 1000, 7, 12])
short_dataset = lang_pair_dataset([11, 20, 9, 1000])
dataset = RoundRobinZipDatasets({"a": long_dataset, "b": short_dataset})
# Dataset is now sorted by sentence length
idx = dataset.ordered_indices()
idx, _ = dataset.filter_indices_by_size(idx, {"a": 19, "b": 900})
self.assertEqual(list(idx), [0, 1, 2, 3, 4])
self.assertEqual(dict(dataset[0]), {"a": sample(5, 7), "b": sample(2, 9)})
self.assertEqual(dict(dataset[2]), {"a": sample(0, 10), "b": sample(1, 20)})
self.assertEqual(dict(dataset[4]), {"a": sample(6, 12), "b": sample(0, 11)})
def test_round_robin_zip_datasets_filtered_with_tuple(self):
long_dataset = lang_pair_dataset([10, 20, 8, 11, 1000, 7, 12])
short_dataset = lang_pair_dataset([11, 20, 9, 1000])
dataset = RoundRobinZipDatasets({"a": long_dataset, "b": short_dataset})
# Dataset is now sorted by sentence length
idx = dataset.ordered_indices()
idx, _ = dataset.filter_indices_by_size(idx, 19)
self.assertEqual(list(idx), [0, 1, 2, 3, 4])
self.assertEqual(dict(dataset[0]), {"a": sample(5, 7), "b": sample(2, 9)})
self.assertEqual(dict(dataset[2]), {"a": sample(0, 10), "b": sample(2, 9)})
self.assertEqual(dict(dataset[4]), {"a": sample(6, 12), "b": sample(2, 9)})
| COCO-LM/fairseq/tests/test_dataset.py/0 | {
"file_path": "COCO-LM/fairseq/tests/test_dataset.py",
"repo_id": "COCO-LM",
"token_count": 1257
} | 204 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import Dict, List
import tests.utils as test_utils
import torch
from fairseq import utils
from fairseq.data import (
Dictionary,
LanguagePairDataset,
TransformEosDataset,
data_utils,
noising,
)
class TestDataNoising(unittest.TestCase):
def _get_test_data_with_bpe_cont_marker(self, append_eos=True):
"""
Args:
append_eos: if True, each input sentence in the source tokens tensor
will have an EOS appended to the end.
Returns:
vocabs: BPE vocab with continuation markers as suffixes to denote
non-end of word tokens. This is the standard BPE format used in
fairseq's preprocessing.
x: input tensor containing numberized source tokens, with EOS at the
end if append_eos is true
src_lengths: and source lengths.
"""
vocab = Dictionary()
vocab.add_symbol("he@@")
vocab.add_symbol("llo")
vocab.add_symbol("how")
vocab.add_symbol("are")
vocab.add_symbol("y@@")
vocab.add_symbol("ou")
vocab.add_symbol("n@@")
vocab.add_symbol("ew")
vocab.add_symbol("or@@")
vocab.add_symbol("k")
src_tokens = [
["he@@", "llo", "n@@", "ew", "y@@", "or@@", "k"],
["how", "are", "y@@", "ou"],
]
x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor(
vocab=vocab, src_tokens=src_tokens, append_eos=append_eos
)
return vocab, x, src_lengths
def _get_test_data_with_bpe_end_marker(self, append_eos=True):
"""
Args:
append_eos: if True, each input sentence in the source tokens tensor
will have an EOS appended to the end.
Returns:
vocabs: BPE vocab with end-of-word markers as suffixes to denote
tokens at the end of a word. This is an alternative to fairseq's
standard preprocessing framework and is not generally supported
within fairseq.
x: input tensor containing numberized source tokens, with EOS at the
end if append_eos is true
src_lengths: and source lengths.
"""
vocab = Dictionary()
vocab.add_symbol("he")
vocab.add_symbol("llo_EOW")
vocab.add_symbol("how_EOW")
vocab.add_symbol("are_EOW")
vocab.add_symbol("y")
vocab.add_symbol("ou_EOW")
vocab.add_symbol("n")
vocab.add_symbol("ew_EOW")
vocab.add_symbol("or")
vocab.add_symbol("k_EOW")
src_tokens = [
["he", "llo_EOW", "n", "ew_EOW", "y", "or", "k_EOW"],
["how_EOW", "are_EOW", "y", "ou_EOW"],
]
x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor(
vocab=vocab, src_tokens=src_tokens, append_eos=append_eos
)
return vocab, x, src_lengths
def _get_test_data_with_word_vocab(self, append_eos=True):
"""
Args:
append_eos: if True, each input sentence in the source tokens tensor
will have an EOS appended to the end.
Returns:
vocabs: word vocab
x: input tensor containing numberized source tokens, with EOS at the
end if append_eos is true
src_lengths: and source lengths.
"""
vocab = Dictionary()
vocab.add_symbol("hello")
vocab.add_symbol("how")
vocab.add_symbol("are")
vocab.add_symbol("you")
vocab.add_symbol("new")
vocab.add_symbol("york")
src_tokens = [
["hello", "new", "york", "you"],
["how", "are", "you", "new", "york"],
]
x, src_lengths = self._convert_src_tokens_to_tensor(
vocab=vocab, src_tokens=src_tokens, append_eos=append_eos
)
return vocab, x, src_lengths
def _convert_src_tokens_to_tensor(
self, vocab: Dictionary, src_tokens: List[List[str]], append_eos: bool
):
src_len = [len(x) for x in src_tokens]
# If we have to append EOS, we include EOS in counting src length
if append_eos:
src_len = [length + 1 for length in src_len]
x = torch.LongTensor(len(src_tokens), max(src_len)).fill_(vocab.pad())
for i in range(len(src_tokens)):
for j in range(len(src_tokens[i])):
x[i][j] = vocab.index(src_tokens[i][j])
if append_eos:
x[i][j + 1] = vocab.eos()
x = x.transpose(1, 0)
return x, torch.LongTensor(src_len)
def assert_eos_at_end(self, x, x_len, eos):
"""Asserts last token of every sentence in x is EOS """
for i in range(len(x_len)):
self.assertEqual(
x[x_len[i] - 1][i],
eos,
(
"Expected eos (token id {eos}) at the end of sentence {i} "
"but got {other} instead"
).format(i=i, eos=eos, other=x[i][-1]),
)
def assert_word_dropout_correct(self, x, x_noised, x_len, l_noised):
# Expect only the first word (2 bpe tokens) of the first example
# was dropped out
self.assertEqual(x_len[0] - 2, l_noised[0])
for i in range(l_noised[0]):
self.assertEqual(x_noised[i][0], x[i + 2][0])
def test_word_dropout_with_eos(self):
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2)
self.assert_word_dropout_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised
)
self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def assert_word_blanking_correct(self, x, x_noised, x_len, l_noised, unk):
# Expect only the first word (2 bpe tokens) of the first example
# was blanked out
self.assertEqual(x_len[0], l_noised[0])
for i in range(l_noised[0]):
if i < 2:
self.assertEqual(x_noised[i][0], unk)
else:
self.assertEqual(x_noised[i][0], x[i][0])
def test_word_blank_with_eos(self):
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk())
self.assert_word_blanking_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk()
)
self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def generate_unchanged_shuffle_map(self, length):
return {i: i for i in range(length)}
def assert_word_shuffle_matches_expected(
self,
x,
x_len,
max_shuffle_distance: int,
vocab: Dictionary,
expected_shufle_maps: List[Dict[int, int]],
expect_eos_at_end: bool,
bpe_end_marker=None,
):
"""
This verifies that with a given x, x_len, max_shuffle_distance, and
vocab, we get the expected shuffle result.
Args:
x: Tensor of shape (T x B) = (sequence_length, batch_size)
x_len: Tensor of length B = batch_size
max_shuffle_distance: arg to pass to noising
expected_shuffle_maps: List[mapping] where mapping is a
Dict[old_index, new_index], mapping x's elements from their
old positions in x to their new positions in x.
expect_eos_at_end: if True, check the output to make sure there is
an EOS at the end.
bpe_end_marker: str denoting the BPE end token. If this is not None, we
set the BPE cont token to None in the noising classes.
"""
bpe_cont_marker = None
if bpe_end_marker is None:
bpe_cont_marker = "@@"
with data_utils.numpy_seed(1234):
word_shuffle = noising.WordShuffle(
vocab, bpe_cont_marker=bpe_cont_marker, bpe_end_marker=bpe_end_marker
)
x_noised, l_noised = word_shuffle.noising(
x, x_len, max_shuffle_distance=max_shuffle_distance
)
# For every example, we have a different expected shuffle map. We check
# that each example is shuffled as expected according to each
# corresponding shuffle map.
for i in range(len(expected_shufle_maps)):
shuffle_map = expected_shufle_maps[i]
for k, v in shuffle_map.items():
self.assertEqual(x[k][i], x_noised[v][i])
# Shuffling should not affect the length of each example
for pre_shuffle_length, post_shuffle_length in zip(x_len, l_noised):
self.assertEqual(pre_shuffle_length, post_shuffle_length)
if expect_eos_at_end:
self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def test_word_shuffle_with_eos(self):
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=True,
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(x_len[0]),
{0: 0, 1: 3, 2: 1, 3: 2},
],
expect_eos_at_end=True,
)
def test_word_shuffle_with_eos_nonbpe(self):
"""The purpose of this is to test shuffling logic with word vocabs"""
vocab, x, x_len = self._get_test_data_with_word_vocab(append_eos=True)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=True,
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
{0: 0, 1: 1, 2: 3, 3: 2},
{0: 0, 1: 2, 2: 1, 3: 3, 4: 4},
],
expect_eos_at_end=True,
)
def test_word_shuffle_without_eos(self):
"""Same result as word shuffle with eos except no EOS at end"""
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=False,
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(x_len[0]),
{0: 0, 1: 3, 2: 1, 3: 2},
],
expect_eos_at_end=False,
)
def test_word_shuffle_without_eos_with_bpe_end_marker(self):
"""Same result as word shuffle without eos except using BPE end token"""
vocab, x, x_len = self._get_test_data_with_bpe_end_marker(append_eos=False)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=False,
bpe_end_marker="_EOW",
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(x_len[0]),
{0: 0, 1: 3, 2: 1, 3: 2},
],
expect_eos_at_end=False,
bpe_end_marker="_EOW",
)
def assert_no_eos_at_end(self, x, x_len, eos):
"""Asserts that the last token of each sentence in x is not EOS """
for i in range(len(x_len)):
self.assertNotEqual(
x[x_len[i] - 1][i],
eos,
"Expected no eos (token id {eos}) at the end of sentence {i}.".format(
eos=eos, i=i
),
)
def test_word_dropout_without_eos(self):
"""Same result as word dropout with eos except no EOS at end"""
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2)
self.assert_word_dropout_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised
)
self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def test_word_blank_without_eos(self):
"""Same result as word blank with eos except no EOS at end"""
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk())
self.assert_word_blanking_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk()
)
self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def _get_noising_dataset_batch(
self,
src_tokens_no_pad,
src_dict,
append_eos_to_tgt=False,
):
"""
Constructs a NoisingDataset and the corresponding
``LanguagePairDataset(NoisingDataset(src), src)``. If
*append_eos_to_tgt* is True, wrap the source dataset in
:class:`TransformEosDataset` to append EOS to the clean source when
using it as the target.
"""
src_dataset = test_utils.TestDataset(data=src_tokens_no_pad)
noising_dataset = noising.NoisingDataset(
src_dataset=src_dataset,
src_dict=src_dict,
seed=1234,
max_word_shuffle_distance=3,
word_dropout_prob=0.2,
word_blanking_prob=0.2,
noising_class=noising.UnsupervisedMTNoising,
)
tgt = src_dataset
language_pair_dataset = LanguagePairDataset(
src=noising_dataset, tgt=tgt, src_sizes=None, src_dict=src_dict
)
language_pair_dataset = TransformEosDataset(
language_pair_dataset,
src_dict.eos(),
append_eos_to_tgt=append_eos_to_tgt,
)
dataloader = torch.utils.data.DataLoader(
dataset=language_pair_dataset,
batch_size=2,
collate_fn=language_pair_dataset.collater,
)
denoising_batch_result = next(iter(dataloader))
return denoising_batch_result
def test_noising_dataset_with_eos(self):
src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker(
append_eos=True
)
# Format data for src_dataset
src_tokens = torch.t(src_tokens)
src_tokens_no_pad = []
for src_sentence in src_tokens:
src_tokens_no_pad.append(
utils.strip_pad(tensor=src_sentence, pad=src_dict.pad())
)
denoising_batch_result = self._get_noising_dataset_batch(
src_tokens_no_pad=src_tokens_no_pad, src_dict=src_dict
)
eos, pad = src_dict.eos(), src_dict.pad()
# Generated noisy source as source
expected_src = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13, eos], [pad, pad, pad, 6, 8, 9, 7, eos]]
)
# Original clean source as target (right-padded)
expected_tgt = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]]
)
generated_src = denoising_batch_result["net_input"]["src_tokens"]
tgt_tokens = denoising_batch_result["target"]
self.assertTensorEqual(expected_src, generated_src)
self.assertTensorEqual(expected_tgt, tgt_tokens)
def test_noising_dataset_without_eos(self):
"""
Similar to test noising dataset with eos except that we have to set
*append_eos_to_tgt* to ``True``.
"""
src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker(
append_eos=False
)
# Format data for src_dataset
src_tokens = torch.t(src_tokens)
src_tokens_no_pad = []
for src_sentence in src_tokens:
src_tokens_no_pad.append(
utils.strip_pad(tensor=src_sentence, pad=src_dict.pad())
)
denoising_batch_result = self._get_noising_dataset_batch(
src_tokens_no_pad=src_tokens_no_pad,
src_dict=src_dict,
append_eos_to_tgt=True,
)
eos, pad = src_dict.eos(), src_dict.pad()
# Generated noisy source as source
expected_src = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13], [pad, pad, pad, 6, 8, 9, 7]]
)
# Original clean source as target (right-padded)
expected_tgt = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]]
)
generated_src = denoising_batch_result["net_input"]["src_tokens"]
tgt_tokens = denoising_batch_result["target"]
self.assertTensorEqual(expected_src, generated_src)
self.assertTensorEqual(expected_tgt, tgt_tokens)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == "__main__":
unittest.main()
| COCO-LM/fairseq/tests/test_noising.py/0 | {
"file_path": "COCO-LM/fairseq/tests/test_noising.py",
"repo_id": "COCO-LM",
"token_count": 10290
} | 205 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# The script is largely adapted from the huggingface transformers library
import re
import os
import unicodedata
from transformers.tokenization_utils import PreTrainedTokenizer
from cocolm.tokenization_utils import Dictionary
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class SentencepiecePreTokenizer(object):
def __init__(self):
self.transl_table = dict( [ (ord(x), ord(y)) for x,y in zip( u"‘’´“”—–-", u"'''\"\"---") ] )
def handle_single_quote(self, tokens):
line = ' '.join(tokens)
line = re.sub(r"' ([smdSMDtT])\b", r"'\1", line)
line = re.sub(r"' ll\b", "'ll", line)
line = re.sub(r"' re\b", "'re", line)
line = re.sub(r"' ve\b", "'ve", line)
line = re.sub(r"' LL\b", "'LL ", line)
line = re.sub(r"' RE\b", "'RE ", line)
line = re.sub(r"' VE\b", "'VE ", line)
return line.split()
def split_on_cont_punc(self, tokens):
new_tokens = []
for token in tokens:
if len(token) > 1:
last_j = 0
pre_is_punc = _is_punctuation(token[0])
for j, ch in enumerate(token):
is_punc = _is_punctuation(ch)
if is_punc != pre_is_punc:
new_tokens.append(token[last_j: j])
last_j = j
pre_is_punc = is_punc
if last_j < len(token):
new_tokens.append(token[last_j:])
else:
new_tokens.append(token)
return new_tokens
def split_pre_and_post_punc(self, tokens):
def pre_punc(token):
last_j = 0
for j in range(1, len(token)):
if not _is_punctuation(token[j]):
last_j = j
break
return token[:last_j], token[last_j:]
def post_punc(token):
last_j = len(token)
for j in range(len(token) - 2, -1, -1):
is_punc = _is_punctuation(token[j])
if not _is_punctuation(token[j]):
last_j = j + 1
break
return token[:last_j], token[last_j:]
new_tokens = []
for token in tokens:
if len(token) > 1 and _is_punctuation(token[0]):
a, b = pre_punc(token)
if a:
new_tokens.append(a)
if b:
if _is_punctuation(b[-1]):
c, d = post_punc(b)
if c:
new_tokens.append(c)
if d:
new_tokens.append(d)
else:
new_tokens.append(b)
elif len(token) > 1 and _is_punctuation(token[-1]):
a, b = post_punc(token)
if a:
new_tokens.append(a)
if b:
new_tokens.append(b)
else:
new_tokens.append(token)
return new_tokens
def tokenize(self, line):
line = line.strip()
line = line.replace("``", '"').replace("''", '"')
line = line.translate(self.transl_table)
tokens = line.split()
tokens = self.split_pre_and_post_punc(tokens)
tokens = self.handle_single_quote(tokens)
return tokens
COCOLM_VOCAB_FILES_NAMES = {"vocab_file": "sp.model", "dict_file": "dict.txt"}
COCOLM_PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"cocolm-cased": "https://huggingface.co/microsoft/cocolm-base/resolve/main/sp.model",
},
"dict_file": {
"cocolm-cased": "https://huggingface.co/microsoft/cocolm-base/resolve/main/dict.txt"
}
}
COCOLM_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"cocolm-cased": 512,
}
class COCOLMTokenizer(PreTrainedTokenizer):
vocab_files_names = COCOLM_VOCAB_FILES_NAMES
pretrained_vocab_files_map = COCOLM_PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = COCOLM_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, dict_file, **kwargs):
super(COCOLMTokenizer, self).__init__(**kwargs)
if not os.path.exists(vocab_file):
raise EnvironmentError("file {} not found".format(vocab_file))
try:
import sentencepiece as spm
self.sp = spm.SentencePieceProcessor()
self.sp.Load(vocab_file)
self.pre_tokenizer = SentencepiecePreTokenizer()
self.dictionary = Dictionary.load(dict_file)
except ImportError:
raise ImportError('Please install sentencepiece with: pip install sentencepiece')
self.dictionary.add_symbol('<mask>')
@property
def cls_token(self):
return self.dictionary.alias_mapper[self.dictionary.bos_word]
@property
def sep_token(self):
return self.dictionary.alias_mapper[self.dictionary.eos_word]
@property
def pad_token(self):
return self.dictionary.alias_mapper[self.dictionary.pad_word]
@property
def unk_token(self):
return self.dictionary.alias_mapper[self.dictionary.unk_word]
@property
def cls_token_id(self):
return self.dictionary.bos_index
@property
def sep_token_id(self):
return self.dictionary.eos_index
@property
def pad_token_id(self):
return self.dictionary.pad_index
@property
def mask_token_id(self):
return self.dictionary.index('<mask>')
@property
def unk_token_id(self):
return self.dictionary.unk_index
def encode_plus(self, text_a, text_b=None, add_special_tokens=True, max_length=512):
tokens_a = self.tokenize(text_a)
if text_b is not None:
tokens_b = self.tokenize(text_b)
_truncate_seq_pair(tokens_a, tokens_b, max_length - 4)
else:
if len(tokens_a) > max_length-2:
tokens_a = tokens_a[:max_length-2]
if add_special_tokens:
tokens = [self.dictionary.bos_word] + tokens_a + [self.dictionary.eos_word]
if text_b is not None:
tokens += [self.dictionary.eos_word] + tokens_b + [self.dictionary.eos_word]
else:
tokens = tokens_a + tokens_b
ids = self.convert_tokens_to_ids(tokens)
return {"input_ids": ids}
def encode(self, x: str, add_special_tokens=False) -> str:
tokens = self.tokenize(x)
return self.convert_tokens_to_ids(tokens)
def decode(self, ids: list) -> str:
x = "".join([self._convert_id_to_token(token_id) for token_id in ids])
return x.replace(' ', '').replace('\u2581', ' ').strip()
def skip_space(self, tokens):
new_tokens = []
for i, token in enumerate(tokens):
skip = False
# skip single space, to reduce total length
if token == '\u2581':
if i == len(tokens) - 1 or _is_punctuation(tokens[i + 1][0]):
skip = True
if not skip:
new_tokens.append(token)
return new_tokens
def tokenize(self, x):
x = ' '.join(self.pre_tokenizer.tokenize(x))
tokens = self.sp.EncodeAsPieces(x)
tokens = self.skip_space(tokens)
return tokens
def convert_tokens_to_ids(self, tokens: list):
ret = []
if isinstance(tokens, str):
return self.dictionary.index(tokens)
for token in tokens:
ret.append(self.dictionary.index(token))
return ret
def _convert_id_to_token(self, index):
""" Converts a token (str) in an id using the vocab. """
token = self.dictionary[index]
return token
def convert_tokens_to_string(self, tokens: list):
x = " ".join(tokens)
return x.replace(' ', '').replace('\u2581', ' ').strip()
def is_beginning_of_word(self, x: str) -> bool:
if x in ["<unk>", "<s>", "</s>", "<pad>", "[CLS]", "[PAD]", "[SEP]", "[UNK]"]:
# special elements are always considered beginnings
# HACK: this logic is already present in fairseq/tasks/masked_lm.py
# but these special tokens are also contained in the sentencepiece
# vocabulary which causes duplicate special tokens. This hack makes
# sure that they are all taken into account.
return True
return x.startswith("\u2581")
| COCO-LM/huggingface/cocolm/tokenization_cocolm.py/0 | {
"file_path": "COCO-LM/huggingface/cocolm/tokenization_cocolm.py",
"repo_id": "COCO-LM",
"token_count": 4756
} | 206 |
# ------------------------------------------
# CSWin Transformer
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# written By Xiaoyi Dong
# ------------------------------------------
from torch.utils.data import Dataset
import numpy as np
import io
from PIL import Image
import os
import json
import random
def load_img(filepath):
img = Image.open(filepath).convert('RGB')
return img
class McDataset(Dataset):
def __init__(self, data_root, file_list, phase = 'train', transform=None):
self.transform = transform
self.root = os.path.join(data_root, phase)
temp_label = json.load(open('./dataset/imagenet_class_index.json', 'r'))
self.labels = {}
for i in range(1000):
self.labels[temp_label[str(i)][0]] = i
self.A_paths = []
self.A_labels = []
with open(file_list, 'r') as f:
temp_path = f.readlines()
for path in temp_path:
label = self.labels[path.split('/')[0]]
self.A_paths.append(os.path.join(self.root, path.strip()))
self.A_labels.append(label)
self.num = len(self.A_paths)
self.A_size = len(self.A_paths)
def __len__(self):
return self.num
def __getitem__(self, index):
try:
return self.load_img(index)
except:
return self.__getitem__(random.randint(0, self.__len__()-1))
def load_img(self, index):
A_path = self.A_paths[index % self.A_size]
A = load_img(A_path)
if self.transform is not None:
A = self.transform(A)
A_label = self.A_labels[index % self.A_size]
return A, A_label
| CSWin-Transformer/labeled_memcached_dataset.py/0 | {
"file_path": "CSWin-Transformer/labeled_memcached_dataset.py",
"repo_id": "CSWin-Transformer",
"token_count": 764
} | 207 |
seed_everything: 42
# ---------------------------- TRAINER -------------------------------------------
trainer:
default_root_dir: ${oc.env:AMLT_OUTPUT_DIR,/home/tungnd/ClimaX/exps/climate_projection_climax}
precision: 16
gpus: null
num_nodes: 1
accelerator: gpu
strategy: ddp
min_epochs: 1
max_epochs: 50
enable_progress_bar: true
sync_batchnorm: True
enable_checkpointing: True
resume_from_checkpoint: null
# debugging
fast_dev_run: false
logger:
class_path: pytorch_lightning.loggers.tensorboard.TensorBoardLogger
init_args:
save_dir: ${trainer.default_root_dir}/logs
name: null
version: null
log_graph: False
default_hp_metric: True
prefix: ""
callbacks:
- class_path: pytorch_lightning.callbacks.LearningRateMonitor
init_args:
logging_interval: "step"
- class_path: pytorch_lightning.callbacks.ModelCheckpoint
init_args:
dirpath: "${trainer.default_root_dir}/checkpoints/"
monitor: "val/w_mse" # name of the logged metric which determines when model is improving
mode: "min" # "max" means higher metric value is better, can be also "min"
save_top_k: 1 # save k best models (determined by above metric)
save_last: True # additionaly always save model from last epoch
verbose: False
filename: "epoch_{epoch:03d}"
auto_insert_metric_name: False
- class_path: pytorch_lightning.callbacks.EarlyStopping
init_args:
monitor: "val/w_mse" # name of the logged metric which determines when model is improving
mode: "min" # "max" means higher metric value is better, can be also "min"
patience: 5 # how many validation epochs of not improving until training stops
min_delta: 0. # minimum change in the monitored metric needed to qualify as an improvement
- class_path: pytorch_lightning.callbacks.RichModelSummary
init_args:
max_depth: -1
- class_path: pytorch_lightning.callbacks.RichProgressBar
# ---------------------------- MODEL -------------------------------------------
model:
lr: 5e-4
beta_1: 0.9
beta_2: 0.999
weight_decay: 1e-5
warmup_epochs: 60
max_epochs: 600
warmup_start_lr: 1e-8
eta_min: 1e-8
pretrained_path: "https://huggingface.co/tungnd/climax/resolve/main/5.625deg.ckpt"
net:
class_path: climax.climate_projection.arch.ClimaXClimateBench
init_args:
default_vars: [
'CO2',
'SO2',
'CH4',
'BC'
]
out_vars: "tas" # diurnal_temperature_range, tas, pr, pr90
img_size: [32, 64]
time_history: 10
patch_size: 2
embed_dim: 1024
depth: 8
num_heads: 16
mlp_ratio: 4
drop_path: 0.1
drop_rate: 0.1
parallel_patch_embed: False
freeze_encoder: True
# ---------------------------- DATA -------------------------------------------
data:
root_dir: /home/data/datasets/climate-learn/climatebench/5.625deg/
history: 10
list_train_simu: [
'ssp126',
'ssp370',
'ssp585',
'historical',
'hist-GHG',
'hist-aer'
]
list_test_simu: ['ssp245']
variables: [
'CO2',
'SO2',
'CH4',
'BC'
]
out_variables: 'tas'
train_ratio: 0.9
batch_size: 1
num_workers: 1
pin_memory: False
| ClimaX/configs/climate_projection.yaml/0 | {
"file_path": "ClimaX/configs/climate_projection.yaml",
"repo_id": "ClimaX",
"token_count": 1367
} | 208 |
# Installation Guide
```bash title="clone the repo"
git clone https://github.com/microsoft/ClimaX
```
=== "`conda`"
```bash title="create and activate env"
cd ClimaX
conda env create --file docker/environment.yml
conda activate climaX
```
```bash title="install this package"
# install so the project is in PYTHONPATH
pip install -e .
```
=== "`docker`"
```bash title="build docker container"
cd docker
docker build -t ClimaX .
```
```bash title="run docker container"
cd ClimaX
docker run --gpus all -it --rm --user $(id -u):$(id -g) \
-v $(pwd):/code -v /mnt/data:/data --workdir /code -e PYTHONPATH=/code/src \
ClimaX:latest
```
!!! note
- `--gpus all -it --rm --user $(id -u):$(id -g)`: enables using all GPUs and runs an interactive session with current user's UID/GUID to avoid `docker` writing files as root.
- `-v $(pwd):/code -v /mnt/data:/data --workdir /code`: mounts current directory and data directory (i.e. the cloned git repo) to `/code` and `/data` respectively, and use the `code` directory as the current working directory.
| ClimaX/docs/install.md/0 | {
"file_path": "ClimaX/docs/install.md",
"repo_id": "ClimaX",
"token_count": 440
} | 209 |
datadir: /data/CMIP6/AWI-ESM
name: v_component_of_wind
cmip_name: va
era_name: v
run: r1i1p1f1
res:
- 1.40625
# - 5.625 | ClimaX/snakemake_configs/AWI-ESM/config_v_component_of_wind.yml/0 | {
"file_path": "ClimaX/snakemake_configs/AWI-ESM/config_v_component_of_wind.yml",
"repo_id": "ClimaX",
"token_count": 67
} | 210 |
datadir: /data/CMIP6/MPI-ESM
server_prefix: https://esgf.ceda.ac.uk/thredds/fileServer/esg_cmip6/CMIP6/CMIP
name: 10m_u_component_of_wind
cmip_name: uas
era_name: u10
output_type: 6hrPlevPt
run: r1i1p1f1
version: v20190710
res:
- 1.40625
# - 5.625 | ClimaX/snakemake_configs/MPI-ESM/config_10m_u_component_of_wind.yml/0 | {
"file_path": "ClimaX/snakemake_configs/MPI-ESM/config_10m_u_component_of_wind.yml",
"repo_id": "ClimaX",
"token_count": 128
} | 211 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from functools import lru_cache
import numpy as np
import torch
import torch.nn as nn
from timm.models.vision_transformer import Block, PatchEmbed, trunc_normal_
from climax.utils.pos_embed import (
get_1d_sincos_pos_embed_from_grid,
get_2d_sincos_pos_embed,
)
from .parallelpatchembed import ParallelVarPatchEmbed
class ClimaX(nn.Module):
"""Implements the ClimaX model as described in the paper,
https://arxiv.org/abs/2301.10343
Args:
default_vars (list): list of default variables to be used for training
img_size (list): image size of the input data
patch_size (int): patch size of the input data
embed_dim (int): embedding dimension
depth (int): number of transformer layers
decoder_depth (int): number of decoder layers
num_heads (int): number of attention heads
mlp_ratio (float): ratio of mlp hidden dimension to embedding dimension
drop_path (float): stochastic depth rate
drop_rate (float): dropout rate
parallel_patch_embed (bool): whether to use parallel patch embedding
"""
def __init__(
self,
default_vars,
img_size=[32, 64],
patch_size=2,
embed_dim=1024,
depth=8,
decoder_depth=2,
num_heads=16,
mlp_ratio=4.0,
drop_path=0.1,
drop_rate=0.1,
parallel_patch_embed=False,
):
super().__init__()
# TODO: remove time_history parameter
self.img_size = img_size
self.patch_size = patch_size
self.default_vars = default_vars
self.parallel_patch_embed = parallel_patch_embed
# variable tokenization: separate embedding layer for each input variable
if self.parallel_patch_embed:
self.token_embeds = ParallelVarPatchEmbed(len(default_vars), img_size, patch_size, embed_dim)
self.num_patches = self.token_embeds.num_patches
else:
self.token_embeds = nn.ModuleList(
[PatchEmbed(img_size, patch_size, 1, embed_dim) for i in range(len(default_vars))]
)
self.num_patches = self.token_embeds[0].num_patches
# variable embedding to denote which variable each token belongs to
# helps in aggregating variables
self.var_embed, self.var_map = self.create_var_embedding(embed_dim)
# variable aggregation: a learnable query and a single-layer cross attention
self.var_query = nn.Parameter(torch.zeros(1, 1, embed_dim), requires_grad=True)
self.var_agg = nn.MultiheadAttention(embed_dim, num_heads, batch_first=True)
# positional embedding and lead time embedding
self.pos_embed = nn.Parameter(torch.zeros(1, self.num_patches, embed_dim), requires_grad=True)
self.lead_time_embed = nn.Linear(1, embed_dim)
# --------------------------------------------------------------------------
# ViT backbone
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList(
[
Block(
embed_dim,
num_heads,
mlp_ratio,
qkv_bias=True,
drop_path=dpr[i],
norm_layer=nn.LayerNorm,
drop=drop_rate,
)
for i in range(depth)
]
)
self.norm = nn.LayerNorm(embed_dim)
# --------------------------------------------------------------------------
# prediction head
self.head = nn.ModuleList()
for _ in range(decoder_depth):
self.head.append(nn.Linear(embed_dim, embed_dim))
self.head.append(nn.GELU())
self.head.append(nn.Linear(embed_dim, len(self.default_vars) * patch_size**2))
self.head = nn.Sequential(*self.head)
# --------------------------------------------------------------------------
self.initialize_weights()
def initialize_weights(self):
# initialize pos_emb and var_emb
pos_embed = get_2d_sincos_pos_embed(
self.pos_embed.shape[-1],
int(self.img_size[0] / self.patch_size),
int(self.img_size[1] / self.patch_size),
cls_token=False,
)
self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0))
var_embed = get_1d_sincos_pos_embed_from_grid(self.var_embed.shape[-1], np.arange(len(self.default_vars)))
self.var_embed.data.copy_(torch.from_numpy(var_embed).float().unsqueeze(0))
# token embedding layer
if self.parallel_patch_embed:
for i in range(len(self.token_embeds.proj_weights)):
w = self.token_embeds.proj_weights[i].data
trunc_normal_(w.view([w.shape[0], -1]), std=0.02)
else:
for i in range(len(self.token_embeds)):
w = self.token_embeds[i].proj.weight.data
trunc_normal_(w.view([w.shape[0], -1]), std=0.02)
# initialize nn.Linear and nn.LayerNorm
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def create_var_embedding(self, dim):
var_embed = nn.Parameter(torch.zeros(1, len(self.default_vars), dim), requires_grad=True)
# TODO: create a mapping from var --> idx
var_map = {}
idx = 0
for var in self.default_vars:
var_map[var] = idx
idx += 1
return var_embed, var_map
@lru_cache(maxsize=None)
def get_var_ids(self, vars, device):
ids = np.array([self.var_map[var] for var in vars])
return torch.from_numpy(ids).to(device)
def get_var_emb(self, var_emb, vars):
ids = self.get_var_ids(vars, var_emb.device)
return var_emb[:, ids, :]
def unpatchify(self, x: torch.Tensor, h=None, w=None):
"""
x: (B, L, V * patch_size**2)
return imgs: (B, V, H, W)
"""
p = self.patch_size
c = len(self.default_vars)
h = self.img_size[0] // p if h is None else h // p
w = self.img_size[1] // p if w is None else w // p
assert h * w == x.shape[1]
x = x.reshape(shape=(x.shape[0], h, w, p, p, c))
x = torch.einsum("nhwpqc->nchpwq", x)
imgs = x.reshape(shape=(x.shape[0], c, h * p, w * p))
return imgs
def aggregate_variables(self, x: torch.Tensor):
"""
x: B, V, L, D
"""
b, _, l, _ = x.shape
x = torch.einsum("bvld->blvd", x)
x = x.flatten(0, 1) # BxL, V, D
var_query = self.var_query.repeat_interleave(x.shape[0], dim=0)
x, _ = self.var_agg(var_query, x, x) # BxL, D
x = x.squeeze()
x = x.unflatten(dim=0, sizes=(b, l)) # B, L, D
return x
def forward_encoder(self, x: torch.Tensor, lead_times: torch.Tensor, variables):
# x: `[B, V, H, W]` shape.
if isinstance(variables, list):
variables = tuple(variables)
# tokenize each variable separately
embeds = []
var_ids = self.get_var_ids(variables, x.device)
if self.parallel_patch_embed:
x = self.token_embeds(x, var_ids) # B, V, L, D
else:
for i in range(len(var_ids)):
id = var_ids[i]
embeds.append(self.token_embeds[id](x[:, i : i + 1]))
x = torch.stack(embeds, dim=1) # B, V, L, D
# add variable embedding
var_embed = self.get_var_emb(self.var_embed, variables)
x = x + var_embed.unsqueeze(2) # B, V, L, D
# variable aggregation
x = self.aggregate_variables(x) # B, L, D
# add pos embedding
x = x + self.pos_embed
# add lead time embedding
lead_time_emb = self.lead_time_embed(lead_times.unsqueeze(-1)) # B, D
lead_time_emb = lead_time_emb.unsqueeze(1)
x = x + lead_time_emb # B, L, D
x = self.pos_drop(x)
# apply Transformer blocks
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x
def forward(self, x, y, lead_times, variables, out_variables, metric, lat):
"""Forward pass through the model.
Args:
x: `[B, Vi, H, W]` shape. Input weather/climate variables
y: `[B, Vo, H, W]` shape. Target weather/climate variables
lead_times: `[B]` shape. Forecasting lead times of each element of the batch.
Returns:
loss (list): Different metrics.
preds (torch.Tensor): `[B, Vo, H, W]` shape. Predicted weather/climate variables.
"""
out_transformers = self.forward_encoder(x, lead_times, variables) # B, L, D
preds = self.head(out_transformers) # B, L, V*p*p
preds = self.unpatchify(preds)
out_var_ids = self.get_var_ids(tuple(out_variables), preds.device)
preds = preds[:, out_var_ids]
if metric is None:
loss = None
else:
loss = [m(preds, y, out_variables, lat) for m in metric]
return loss, preds
def evaluate(self, x, y, lead_times, variables, out_variables, transform, metrics, lat, clim, log_postfix):
_, preds = self.forward(x, y, lead_times, variables, out_variables, metric=None, lat=lat)
return [m(preds, y, transform, out_variables, lat, clim, log_postfix) for m in metrics]
| ClimaX/src/climax/arch.py/0 | {
"file_path": "ClimaX/src/climax/arch.py",
"repo_id": "ClimaX",
"token_count": 4699
} | 212 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from pytorch_lightning.cli import LightningCLI
from climax.pretrain.datamodule import MultiSourceDataModule
from climax.pretrain.module import PretrainModule
def main():
# Initialize Lightning with the model and data modules, and instruct it to parse the config yml
cli = LightningCLI(
model_class=PretrainModule,
datamodule_class=MultiSourceDataModule,
seed_everything_default=42,
save_config_overwrite=True,
run=False,
auto_registry=True,
parser_kwargs={"parser_mode": "omegaconf", "error_handler": None},
)
os.makedirs(cli.trainer.default_root_dir, exist_ok=True)
cli.model.set_lat_lon(*cli.datamodule.get_lat_lon())
# fit() runs the training
cli.trainer.fit(cli.model, datamodule=cli.datamodule)
if __name__ == "__main__":
main()
| ClimaX/src/climax/pretrain/train.py/0 | {
"file_path": "ClimaX/src/climax/pretrain/train.py",
"repo_id": "ClimaX",
"token_count": 349
} | 213 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import importlib
import torch.utils.data
from data.base_dataset import BaseDataset
def find_dataset_using_name(dataset_name):
dataset_filename = "data." + dataset_name + "_dataset"
datasetlib = importlib.import_module(dataset_filename)
dataset = None
target_dataset_name = dataset_name.replace('_', '') + 'dataset'
for name, cls in datasetlib.__dict__.items():
if name.lower() == target_dataset_name.lower() \
and issubclass(cls, BaseDataset):
dataset = cls
if dataset is None:
raise ValueError("In %s.py, there should be a subclass of BaseDataset "
"with class name that matches %s in lowercase." %
(dataset_filename, target_dataset_name))
return dataset
def get_option_setter(dataset_name):
dataset_class = find_dataset_using_name(dataset_name)
return dataset_class.modify_commandline_options
def create_dataloader(opt):
dataset = find_dataset_using_name(opt.dataset_mode)
instance = dataset()
instance.initialize(opt)
print("Dataset [%s] of size %d was created" % (type(instance).__name__, len(instance)))
dataloader = torch.utils.data.DataLoader(
instance,
batch_size=opt.batchSize,
shuffle=(opt.phase=='train'),
num_workers=int(opt.nThreads),
drop_last=(opt.phase=='train')
)
return dataloader
| CoCosNet-v2/data/__init__.py/0 | {
"file_path": "CoCosNet-v2/data/__init__.py",
"repo_id": "CoCosNet-v2",
"token_count": 608
} | 214 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
from models.networks.base_network import BaseNetwork
from models.networks.architecture import SPADEResnetBlock
class SPADEGenerator(BaseNetwork):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.set_defaults(norm_G='spectralspadesyncbatch3x3')
return parser
def __init__(self, opt):
super().__init__()
self.opt = opt
nf = opt.ngf
self.sw, self.sh = self.compute_latent_vector_size(opt)
ic = 4*3+opt.label_nc
self.fc = nn.Conv2d(ic, 8 * nf, 3, padding=1)
self.head_0 = SPADEResnetBlock(8 * nf, 8 * nf, opt)
self.G_middle_0 = SPADEResnetBlock(8 * nf, 8 * nf, opt)
self.G_middle_1 = SPADEResnetBlock(8 * nf, 8 * nf, opt)
self.up_0 = SPADEResnetBlock(8 * nf, 8 * nf, opt)
self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)
self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)
self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)
final_nc = nf
self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, opt):
num_up_layers = 5
sw = opt.crop_size // (2**num_up_layers)
sh = round(sw / opt.aspect_ratio)
return sw, sh
def forward(self, input, warp_out=None):
seg = torch.cat((F.interpolate(warp_out[0], size=(512, 512)), F.interpolate(warp_out[1], size=(512, 512)), F.interpolate(warp_out[2], size=(512, 512)), warp_out[3], input), dim=1)
x = F.interpolate(seg, size=(self.sh, self.sw))
x = self.fc(x)
x = self.head_0(x, seg)
x = self.up(x)
x = self.G_middle_0(x, seg)
x = self.G_middle_1(x, seg)
x = self.up(x)
x = self.up_0(x, seg)
x = self.up(x)
x = self.up_1(x, seg)
x = self.up(x)
x = self.up_2(x, seg)
x = self.up(x)
x = self.up_3(x, seg)
x = self.conv_img(F.leaky_relu(x, 2e-1))
x = torch.tanh(x)
return x
| CoCosNet-v2/models/networks/generator.py/0 | {
"file_path": "CoCosNet-v2/models/networks/generator.py",
"repo_id": "CoCosNet-v2",
"token_count": 1128
} | 215 |
# -*- coding: utf-8 -*-
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import numpy as np
from more_itertools import chunked
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--test_batch_size', type=int, default=1000)
args = parser.parse_args()
languages = ['ruby', 'go', 'php', 'python', 'java', 'javascript']
MRR_dict = {}
for language in languages:
file_dir = './results/{}'.format(language)
ranks = []
num_batch = 0
for file in sorted(os.listdir(file_dir)):
print(os.path.join(file_dir, file))
with open(os.path.join(file_dir, file), encoding='utf-8') as f:
batched_data = chunked(f.readlines(), args.test_batch_size)
for batch_idx, batch_data in enumerate(batched_data):
num_batch += 1
correct_score = float(batch_data[batch_idx].strip().split('<CODESPLIT>')[-1])
scores = np.array([float(data.strip().split('<CODESPLIT>')[-1]) for data in batch_data])
rank = np.sum(scores >= correct_score)
ranks.append(rank)
mean_mrr = np.mean(1.0 / np.array(ranks))
print("{} mrr: {}".format(language, mean_mrr))
MRR_dict[language] = mean_mrr
for key, val in MRR_dict.items():
print("{} mrr: {}".format(key, val))
if __name__ == "__main__":
main()
| CodeBERT/CodeBERT/codesearch/mrr.py/0 | {
"file_path": "CodeBERT/CodeBERT/codesearch/mrr.py",
"repo_id": "CodeBERT",
"token_count": 676
} | 216 |
PER_NODE_GPU=8
python -m torch.distributed.launch --nproc_per_node=${PER_NODE_GPU} run.py \
--output_dir ../saved_models/pretrain_codeexecutor_stage_3 \
--data_cache_dir ../saved_models/pretrain_codeexecutor_stage_3 \
--train_data_path /drive/pretrain_codenetmut.json \
--another_train_data_path /drive/pretrain_tutorial.json \
--third_train_data_path /drive/single_line_hard_3_million.json \
--eval_data_path ../data/codenetmut_test.json \
--model_name_or_path ../saved_models/pretrain_codeexecutor_stage_2 \
--block_size 1024 \
--per_gpu_train_batch_size 4 \
--per_gpu_eval_batch_size 8 \
--gradient_accumulation_steps 8 \
--learning_rate 4e-4 \
--node_index=0 \
--gpu_per_node $PER_NODE_GPU \
--weight_decay 0.01 \
--adam_epsilon 1e-6 \
--max_grad_norm 1.0 \
--max_steps 1000000 \
--warmup_steps 10000 \
--save_steps 5000 \
--seed 123 | CodeBERT/CodeExecutor/pretrain/run.sh/0 | {
"file_path": "CodeBERT/CodeExecutor/pretrain/run.sh",
"repo_id": "CodeBERT",
"token_count": 398
} | 217 |
# Natural Language Toolkit: Utility functions
#
# Copyright (C) 2001-2020 NLTK Project
# Author: Steven Bird <stevenbird1@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from itertools import chain
def pad_sequence(
sequence,
n,
pad_left=False,
pad_right=False,
left_pad_symbol=None,
right_pad_symbol=None,
):
"""
Returns a padded sequence of items before ngram extraction.
>>> list(pad_sequence([1,2,3,4,5], 2, pad_left=True, pad_right=True, left_pad_symbol='<s>', right_pad_symbol='</s>'))
['<s>', 1, 2, 3, 4, 5, '</s>']
>>> list(pad_sequence([1,2,3,4,5], 2, pad_left=True, left_pad_symbol='<s>'))
['<s>', 1, 2, 3, 4, 5]
>>> list(pad_sequence([1,2,3,4,5], 2, pad_right=True, right_pad_symbol='</s>'))
[1, 2, 3, 4, 5, '</s>']
:param sequence: the source data to be padded
:type sequence: sequence or iter
:param n: the degree of the ngrams
:type n: int
:param pad_left: whether the ngrams should be left-padded
:type pad_left: bool
:param pad_right: whether the ngrams should be right-padded
:type pad_right: bool
:param left_pad_symbol: the symbol to use for left padding (default is None)
:type left_pad_symbol: any
:param right_pad_symbol: the symbol to use for right padding (default is None)
:type right_pad_symbol: any
:rtype: sequence or iter
"""
sequence = iter(sequence)
if pad_left:
sequence = chain((left_pad_symbol,) * (n - 1), sequence)
if pad_right:
sequence = chain(sequence, (right_pad_symbol,) * (n - 1))
return sequence
# add a flag to pad the sequence so we get peripheral ngrams?
def ngrams(
sequence,
n,
pad_left=False,
pad_right=False,
left_pad_symbol=None,
right_pad_symbol=None,
):
"""
Return the ngrams generated from a sequence of items, as an iterator.
For example:
>>> from nltk.util import ngrams
>>> list(ngrams([1,2,3,4,5], 3))
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
Wrap with list for a list version of this function. Set pad_left
or pad_right to true in order to get additional ngrams:
>>> list(ngrams([1,2,3,4,5], 2, pad_right=True))
[(1, 2), (2, 3), (3, 4), (4, 5), (5, None)]
>>> list(ngrams([1,2,3,4,5], 2, pad_right=True, right_pad_symbol='</s>'))
[(1, 2), (2, 3), (3, 4), (4, 5), (5, '</s>')]
>>> list(ngrams([1,2,3,4,5], 2, pad_left=True, left_pad_symbol='<s>'))
[('<s>', 1), (1, 2), (2, 3), (3, 4), (4, 5)]
>>> list(ngrams([1,2,3,4,5], 2, pad_left=True, pad_right=True, left_pad_symbol='<s>', right_pad_symbol='</s>'))
[('<s>', 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, '</s>')]
:param sequence: the source data to be converted into ngrams
:type sequence: sequence or iter
:param n: the degree of the ngrams
:type n: int
:param pad_left: whether the ngrams should be left-padded
:type pad_left: bool
:param pad_right: whether the ngrams should be right-padded
:type pad_right: bool
:param left_pad_symbol: the symbol to use for left padding (default is None)
:type left_pad_symbol: any
:param right_pad_symbol: the symbol to use for right padding (default is None)
:type right_pad_symbol: any
:rtype: sequence or iter
"""
sequence = pad_sequence(
sequence, n, pad_left, pad_right, left_pad_symbol, right_pad_symbol
)
history = []
while n > 1:
# PEP 479, prevent RuntimeError from being raised when StopIteration bubbles out of generator
try:
next_item = next(sequence)
except StopIteration:
# no more data, terminate the generator
return
history.append(next_item)
n -= 1
for item in sequence:
history.append(item)
yield tuple(history)
del history[0] | CodeBERT/CodeReviewer/code/evaluator/CodeBLEU/utils.py/0 | {
"file_path": "CodeBERT/CodeReviewer/code/evaluator/CodeBLEU/utils.py",
"repo_id": "CodeBERT",
"token_count": 1699
} | 218 |
# batch size 6 for 16 GB GPU
mnt_dir="/home/codereview"
MASTER_HOST=localhost && echo MASTER_HOST: ${MASTER_HOST}
MASTER_PORT=23333 && echo MASTER_PORT: ${MASTER_PORT}
RANK=0 && echo RANK: ${RANK}
PER_NODE_GPU=1 && echo PER_NODE_GPU: ${PER_NODE_GPU}
WORLD_SIZE=1 && echo WORLD_SIZE: ${WORLD_SIZE}
NODES=1 && echo NODES: ${NODES}
NCCL_DEBUG=INFO
# change break_cnt to truncate the number of examples (useful at debug time maybe)
# --break_cnt -1 \ will keep the whole dataset
python -m torch.distributed.launch --nproc_per_node ${PER_NODE_GPU} --node_rank=${RANK} --nnodes=${NODES} --master_addr=${MASTER_HOST} --master_port=${MASTER_PORT} ../run_infer_msg.py \
--model_name_or_path microsoft/codereviewer \
--output_dir ../../save/gen \
--load_model_path ../../save/gen/checkpoint \
--output_dir empty \
--eval_file test.jsonl \
--out_file test_out.jsonl \
--max_source_length 512 \
--max_target_length 128 \
--eval_batch_size 12 \
--beam_size 10 \
--gpu_per_node=${PER_NODE_GPU} \
--node_index=${RANK} \
--seed 2233 \
--raw_input \
--break_cnt 20
| CodeBERT/CodeReviewer/code/sh/infer-json.sh/0 | {
"file_path": "CodeBERT/CodeReviewer/code/sh/infer-json.sh",
"repo_id": "CodeBERT",
"token_count": 442
} | 219 |
# Code Generation
## Data Download
```bash
mkdir dataset
cd dataset
wget https://github.com/microsoft/CodeXGLUE/raw/main/Text-Code/text-to-code/dataset/concode/train.json
wget https://github.com/microsoft/CodeXGLUE/raw/main/Text-Code/text-to-code/dataset/concode/dev.json
wget https://github.com/microsoft/CodeXGLUE/raw/main/Text-Code/text-to-code/dataset/concode/test.json
cd ..
```
## Dependency
- pip install torch
- pip install transformers
## Fine-Tune Setting
Here we provide fine-tune settings for code generation, whose results are reported in the paper.
```shell
# Training
python run.py \
--do_train \
--do_eval \
--model_name_or_path microsoft/unixcoder-base \
--train_filename dataset/train.json \
--dev_filename dataset/dev.json \
--output_dir saved_models \
--max_source_length 350 \
--max_target_length 150 \
--beam_size 3 \
--train_batch_size 32 \
--eval_batch_size 32 \
--learning_rate 5e-5 \
--gradient_accumulation_steps 1 \
--num_train_epochs 30
# Output results
python run.py \
--do_test \
--model_name_or_path microsoft/unixcoder-base \
--test_filename dataset/test.json \
--output_dir saved_models \
--max_source_length 350 \
--max_target_length 150 \
--beam_size 3 \
--train_batch_size 32 \
--eval_batch_size 32 \
--learning_rate 5e-5 \
--gradient_accumulation_steps 1 \
--num_train_epochs 30
```
Prediction results of test set are ```saved_models/predictions.txt```.To obtain the score of test set, you need to send the prediction to codexglue@microsoft.com.
| CodeBERT/UniXcoder/downstream-tasks/code-generation/README.md/0 | {
"file_path": "CodeBERT/UniXcoder/downstream-tasks/code-generation/README.md",
"repo_id": "CodeBERT",
"token_count": 553
} | 220 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import statistics
import numpy as np
from collections import defaultdict
import logging
from typing import List, Union
import itertools
logging.basicConfig(
format="SystemLog: [%(asctime)s][%(name)s][%(levelname)s] - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def _dictionized_ground_truth_results(ground_truth_exec_results):
ground_truth_results_by_task_and_solution = defaultdict(defaultdict)
for result in ground_truth_exec_results:
ground_truth_results_by_task_and_solution[result['task_id']][result['completion']] = result['passed']
return ground_truth_results_by_task_and_solution
def _turn_solution_scores_into_choose_count(sorted_solution_scores, topk):
# sorted_solution_scores: list of (solution, score)
# if wrapped, sorted_solution_scores is list of ([solutions], score)
# return list of (solution, choose_count)
wrapped = True if type(sorted_solution_scores[0][0]) == list else False
result = []
if wrapped:
last_score = sorted_solution_scores[0][1]
merged_solutions_and_score = [sorted_solution_scores[0]]
for solutions, score in sorted_solution_scores[1:]:
if score == last_score:
last_solutions = merged_solutions_and_score[-1][0]
merged_solutions_and_score[-1] = (last_solutions + solutions, score)
else:
merged_solutions_and_score.append((solutions, score))
last_score = score
for solutions_and_score in merged_solutions_and_score:
result.append((solutions_and_score[0], 1)) # choose one from solutions_and_score
else:
topk_scores = sorted(list(set([i[1] for i in sorted_solution_scores])), reverse=True)
for score in topk_scores:
solutions = [s[0] for s in sorted_solution_scores if s[1] == score]
result.append((solutions, 1))
if len(result) >= topk:
return result[:topk]
else:
intial_choose_count = [1]*len(result)
for i in range(topk-len(result)):
intial_choose_count[i%len(result)] += 1
for i, choose_count in enumerate(intial_choose_count):
result[i] = (result[i][0], choose_count)
return result
def get_result_of_sorted_solutions(ground_truth_results_list, sorted_solutions_by_task, topks=[1,2,10]):
# sorted_solutions_by_task {task_id: [([solutions], score), ...]}
def _count_correct(solutions: list, ground_truth_results: dict) -> int:
return sum([ground_truth_results[s] for s in solutions])
ground_truth_results = _dictionized_ground_truth_results(ground_truth_results_list)
topk_results = dict()
for topk in topks:
random_pass_at_k_by_task = pass_at_K_by_task(ground_truth_results_list, k=topk)
pass_rates = []
for task_id in ground_truth_results.keys():
all_wrong_probability = 1
if task_id in sorted_solutions_by_task and sorted_solutions_by_task[task_id]:
solutions_and_probability = _turn_solution_scores_into_choose_count(sorted_solutions_by_task[task_id], topk)
for solutions, choose_count in solutions_and_probability:
current_wrong_prob = _estimator(len(solutions), _count_correct(solutions, ground_truth_results[task_id]), 1)
repeat_current_wrong_prob = pow(current_wrong_prob, choose_count)
all_wrong_probability *= repeat_current_wrong_prob
pass_rates.append(1-all_wrong_probability)
else:
pass_rates.append(random_pass_at_k_by_task[task_id])
# the avg rate of all tasks
topk_results[f'pass@{topk}'] = round(statistics.mean(pass_rates), 4)
logger.info(topk_results)
def pass_at_K_by_task(results, k):
result_dict = defaultdict(list)
for line in results:
result_dict[line['task_id']].append(line['passed'])
result = dict()
for task_id in result_dict.keys():
total = len(result_dict[task_id])
correct = sum(result_dict[task_id])
score = _estimate_pass_at_k(total, [correct], k)[0]
result[task_id] = score
return result
def pass_at_K(results, k = [1, 10, 100]):
def _turn_list_into_dict(result_lines):
result_dict = defaultdict(list)
for line in result_lines:
result_dict[line['task_id']].append(line['passed'])
return result_dict
# Calculate pass@k.
total, correct = [], []
for passed in _turn_list_into_dict(results).values():
total.append(len(passed))
correct.append(sum(passed))
total = np.array(total)
correct = np.array(correct)
ks = k
pass_at_k = {f"pass@{k}": round(_estimate_pass_at_k(total, correct, k).mean(), 4)
for k in ks if (total >= k).all()}
logger.info(pass_at_k)
def _estimator(n: int, c: int, k: int) -> float:
"""
Calculates comb(n - c, k) / comb(n, k).
"""
if n - c < k:
return 0
return np.prod(1.0 - k / np.arange(n - c + 1, n + 1))
def _estimate_pass_at_k(
num_samples: Union[int, List[int], np.ndarray],
num_correct: Union[List[int], np.ndarray],
k: int
) -> np.ndarray:
"""
Estimates pass@k of each problem and returns them in an array.
"""
if isinstance(num_samples, int):
num_samples_it = itertools.repeat(num_samples, len(num_correct))
else:
assert len(num_samples) == len(num_correct)
num_samples_it = iter(num_samples)
return np.array([1.0 - _estimator(int(n), int(c), k) for n, c in zip(num_samples_it, num_correct)]) | CodeT/CodeT/src/evaluation.py/0 | {
"file_path": "CodeT/CodeT/src/evaluation.py",
"repo_id": "CodeT",
"token_count": 2525
} | 221 |
import os
import json
import random
import argparse
from tqdm import tqdm
import re
import utils_io
from utils import (
GSM8KCase,
TextEntailmentCase,
GSM8KExample,
TextEntailmentExample,
compute_top1_and_recall,
post_process_answer_clutrr_mapping,
post_process_answer_clutrr_cutoff,
)
from transformers import (
AutoTokenizer,
AutoModelForSequenceClassification,
)
import torch
import pdb
import logging
logger = logging.getLogger(__name__)
case_class_map = {
"GSM8K": GSM8KCase,
"CLUTRR": TextEntailmentCase,
"strategyQA": TextEntailmentCase,
}
example_class_map = {
"GSM8K": GSM8KExample,
"CLUTRR": TextEntailmentExample,
"strategyQA": TextEntailmentExample,
}
relation_reverse_map = {
'sister': ['brother'],
'son': ['father', 'mother'],
'aunt': ['nephew', 'niece'],
'granddaughter': ['grandfather', 'grandmother'],
'father': ['son', 'daughter'],
'grandfather': ['grandson', 'granddaughter'],
'grandmother': ['grandson', 'granddaughter'],
'mother-in-law': ['son-in-law', 'daughter-in-law'],
'uncle': ['nephew', 'niece'],
'niece': ['uncle', 'aunt'],
'mother': ['son', 'daughter'],
'brother': ['sister'],
'daughter': ['father', 'mother'],
'nephew': ['uncle', 'aunt'],
'grandson': ['grandfather', 'grandmother'],
'son-in-law': ['father-in-law', 'mother-in-law'],
'father-in-law': ['son-in-law', 'daughter-in-law'],
'daughter-in-law': ['father-in-law', 'mother-in-law'],
}
device = "cuda" if torch.cuda.is_available() else "cpu"
# device = "cpu"
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--generator_result_file", type=str, default=None, help="generator output file in .jsonl format")
parser.add_argument("--output_dir", type=str, default=None, help="output dir")
parser.add_argument("--random_seed", type=int, default=233, help="random_seed")
parser.add_argument("--split", type=str, default="train", help="split (train or test)")
parser.add_argument("--dataset_name", type=str, default="GSM8K", help="GSM8K, CLUTRR, strategyQA")
parser.add_argument("--text_entailment_model_name", type=str, default="roberta-large-mnli", help="roberta-large-mnli, facebook/bart-large-mnli, etc.")
parser.add_argument("--text_entailment_batch_size", type=int, default=512, help="text entailment batch size")
args = parser.parse_args()
random.seed(args.random_seed)
if args.dataset_name != "GSM8K":
logger.info("Loading textual entailment models...")
model = AutoModelForSequenceClassification.from_pretrained(args.text_entailment_model_name).to(device)
model.eval()
tokenizer = AutoTokenizer.from_pretrained(args.text_entailment_model_name)
else:
model = None
tokenizer = None
# loading data from generator output result file
generator_outputs = [json.loads(line) for line in open(utils_io.get_file(args.generator_result_file))]
question_to_ground_truth = {}
# prompt data make up
prompt_data = []
for generator_output in generator_outputs:
context = generator_output["context"]
samples = generator_output["samples"]
for sample in samples:
metadata = generator_output["metadata"]
prompt_data.append({"context": context, "sample": sample, "metadata": metadata})
prompt_data_dict = {}
# some pre-processing about formulas and answers for GSM8K and other datasets
for obj in tqdm(prompt_data):
question = obj["metadata"]["question"].strip().replace("\n", "")
def extract_solution(sample):
sample = sample.strip()
if '####' in sample:
stop = sample.find('\n\n', sample.index('####'))
if stop >= 0:
sample = sample[:stop]
sample = sample.replace('\n\n', '\n')
return sample
sample = extract_solution(obj["sample"])
sample = sample.strip().replace("\n", "%%") # for sequence labeling
ground_truth = obj["metadata"]["ground_truth"].strip().replace("\n\n", "\n").replace("\n", "%%") # for sequence labeling
if args.dataset_name == "GSM8K":
if "####" not in sample:
reg = "<<.+>>[\d\.]+"
eqs = re.findall(reg, sample)
if len(eqs) > 0:
final_answer = eqs[-1].split(">>")[-1].strip()
if final_answer and len(final_answer) > 0 and final_answer[-1] == '.':
final_answer = final_answer[:-1]
if sample[-2:] == "%%":
sample = sample + "####" + final_answer
else:
sample = sample + "%%####" + final_answer
elif args.dataset_name == "CLUTRR":
pass
if "####" not in sample:
reg = "the.+?of"
eqs = re.findall(reg, sample)
if len(eqs) > 0:
final_answer = eqs[-1].replace("the ", "").replace(" of", "")
if sample[-2:] == "%%":
sample = sample + "####" + final_answer
else:
sample = sample + "%%####" + final_answer
if question not in prompt_data_dict:
prompt_data_dict[question] = []
sample = sample.replace("\n", "%%") # for sequence labeling
ground_truth = ground_truth.replace("\n", "%%") # for sequence labeling
question_to_ground_truth[question] = ground_truth
prompt_data_dict[question].append(sample)
# # code change
# if args.dataset_name == "CLUTRR":
# if "####" not in sample:
# continue
# sample_body, sample_answer = sample.split("####")[0].strip(), sample.split("####")[-1].strip()
# # pdb.set_trace()
# if sample_answer in relation_reverse_map:
# for reverse in relation_reverse_map[sample_answer]:
# prompt_data_dict[question].append(sample_body + "####" + reverse)
# check the least sample num among all the cases
min_sample_num_per_case = 99999999
for k in prompt_data_dict:
min_sample_num_per_case = min(min_sample_num_per_case, len(prompt_data_dict[k]))
# converting data into Case
prompt_cases = []
for k in prompt_data_dict:
case = case_class_map[args.dataset_name]("", [])
case.question = k
case.ground_truth = example_class_map[args.dataset_name](question_to_ground_truth[k])
case.entailment_batch_size = args.text_entailment_batch_size
for sample_idx, x in enumerate(prompt_data_dict[k]):
if sample_idx >= min_sample_num_per_case:
break
pred = example_class_map[args.dataset_name](x)
case.preds.append(pred)
prompt_cases.append(case)
print(f"Total cases: {len(prompt_cases)}".replace("\n", "\\n"))
print(f"Case 0's question: {prompt_cases[0].question}".replace("\n", "\\n"))
print(f"Case 0's ground truth: {prompt_cases[0].ground_truth.content}".replace("\n", "\\n"))
print(f"Case 0's sample0: {prompt_cases[0].preds[0].content}".replace("\n", "\\n"))
# print the random top1 and recall of the data
print("*********** Data statistics ***********")
res = compute_top1_and_recall(data=prompt_cases)
for k in res:
print(f"{k}: {res[k]}")
print("")
if args.dataset_name == "CLUTRR":
prompt_cases = post_process_answer_clutrr_cutoff(prompt_cases)
# print the random top1 and recall of the data
print("*********** Data statistics (after post processing for CLUTRR) ***********")
res = compute_top1_and_recall(data=prompt_cases)
for k in res:
print(f"{k}: {res[k]}")
print("")
# Step-wise Labeling
for j, case in enumerate(tqdm(prompt_cases)):
case.do_step_labeling(model=model, tokenizer=tokenizer)
# pdb.set_trace()
for case_idx, case in enumerate(tqdm(prompt_cases)):
case.ground_truth.sequence_labels = example_class_map[args.dataset_name].get_sequence_labels(case.question, case.ground_truth)
for pred_idx, pred in enumerate(case.preds):
pred.sequence_labels = example_class_map[args.dataset_name].get_sequence_labels(case.question, pred)
# pdb.set_trace()
# pdb.set_trace()
sequence_data = []
for case_idx, case in enumerate(tqdm(prompt_cases)):
sequence_data.append(case.ground_truth.sequence_labels)
for pred_idx, pred in enumerate(case.preds):
sequence_data.append(pred.sequence_labels)
# pdb.set_trace()
# Train file is shuffled, but test file is not
if args.split == "train":
random.shuffle(sequence_data)
with open(os.path.join(args.output_dir, '{}.txt'.format(args.split)), "w") as f:
for i, arr in enumerate(tqdm(sequence_data)):
for lhs, rhs in arr:
f.write(f"{lhs} {rhs}\n")
f.write("\n")
if __name__ == '__main__':
main() | CodeT/DIVERSE/code/src/verifier_data_prepare.py/0 | {
"file_path": "CodeT/DIVERSE/code/src/verifier_data_prepare.py",
"repo_id": "CodeT",
"token_count": 4067
} | 222 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import functools
import os
from utils import Tools, FilePathBuilder, CodexTokenizer, CodeGenTokenizer, CONSTANTS
class PromptBuilder:
def __init__(self, query_lines_with_retrieval_results, task_path, log_message, tokenizer):
self.query_lines_with_retrieval_results = query_lines_with_retrieval_results
self.log_message = log_message
if tokenizer == CodexTokenizer:
self.tokenizer = CodexTokenizer()
self.max_retrieval_length = 2000 # half of the max length of the model
elif tokenizer == CodeGenTokenizer:
self.tokenizer = CodeGenTokenizer()
self.max_retrieval_length = 1000
tasks = Tools.load_jsonl(task_path)
self.tasks_by_task_id = {task['metadata']['task_id']: task for task in tasks}
self.seperator = '# ' + '-' * 50
self.max_examples = 10 # maximum number of examples to be included in the prompt
def _make_a_block(self, retrieved_context):
content, sim_score = retrieved_context
metadata = content['metadata']
# put the file path in the comment
assert metadata[0]['fpath_tuple'][0] == metadata[0]['repo']
f_paths = ['/'.join(x['fpath_tuple'][1:]) for x in metadata]
f_paths_str = '\n'.join([f'# {f_path}' for f_path in f_paths])
f_path_comment = f'# the below code fragment can be found in:'
# put code lines in the comment
content_lines = content['context'].splitlines()
content_lines_comment = [f'# {line}' for line in content_lines]
# aggregate the comment and the code lines
block_str = '\n'.join([f_path_comment, f_paths_str, self.seperator] + content_lines_comment + [self.seperator]) + '\n'
tokenized_block = self.tokenizer.tokenize(block_str)
token_len = len(tokenized_block)
return block_str, token_len
def _make_an_extended_block(self, retrieved_context):
content, sim_score = retrieved_context
metadata = content['metadata']
# put the file path in the comment
assert metadata[0]['fpath_tuple'][0] == metadata[0]['repo']
f_paths = ['/'.join(x['fpath_tuple'][1:]) for x in metadata]
f_paths_str = '\n'.join([f'# {f_path}' for f_path in f_paths])
f_path_comment = f'# the below code fragment can be found in:'
# put code lines in the comment
original_code = Tools.read_code(os.path.join(FilePathBuilder.repo_base_dir, *metadata[0]['fpath_tuple']))
code_lines = original_code.splitlines()
end_line_no = metadata[0]['end_line_no']
window_size = metadata[0]['window_size']
slice_size = metadata[0]['slice_size']
new_end_line_no = min(end_line_no + window_size // slice_size, len(code_lines))
new_start_line_no = max(0, new_end_line_no - window_size)
content_lines = code_lines[new_start_line_no:new_end_line_no]
content_lines_comment = [f'# {line}' for line in content_lines]
# aggregate the comment and the code lines
block_str = '\n'.join([f_path_comment, f_paths_str, self.seperator] + content_lines_comment + [self.seperator]) + '\n'
tokenized_block = self.tokenizer.tokenize(block_str)
token_len = len(tokenized_block)
return block_str, token_len
def _build_prompt(self, mode, prompt, top_k_context):
prepend_context = "# Here are some relevant code fragments from other files of the repo:\n"
prepend_context += self.seperator + '\n'
current_token_length = 20 # the length of the head_prompt, same for codex and codegen tokenizer
prepend_blocks = []
chosen_context = []
make_block_func = self._make_an_extended_block if mode == CONSTANTS.rg else self._make_a_block
for retrieved_context in top_k_context[::-1]:
if len(chosen_context) >= self.max_examples:
break
block_str, token_len = make_block_func(retrieved_context)
if current_token_length + token_len < self.max_retrieval_length:
prepend_blocks.insert(0, block_str)
current_token_length += token_len
chosen_context.append(retrieved_context)
else:
continue
prepend_context += ''.join(prepend_blocks) # all the blocks already have a line break at the end
return prepend_context + '\n' + prompt, chosen_context
def build_2nd_stage_input_file(self, mode):
new_prompt_lines = []
for query_line in self.query_lines_with_retrieval_results:
task_id = query_line['metadata']['task_id']
task = self.tasks_by_task_id[task_id]
old_prompt = task['prompt']
top_k_context = query_line['top_k_context']
new_prompt, chosen_context = self._build_prompt(mode, old_prompt, top_k_context)
new_prompt_line = {
'prompt': new_prompt,
'metadata': task['metadata'],
}
new_prompt_line['metadata']['query_window'] = {
'context': query_line['context'],
'metadata': query_line['metadata'],
}
new_prompt_line['metadata']['top_k_context'] = [
{
'context': x[0]['context'],
'metadata': x[0]['metadata'],
'sim_score': x[1],
} for x in chosen_context
]
new_prompt_line['metadata']['window_size'] = query_line['metadata']['window_size']
new_prompt_line['metadata']['slice_size'] = chosen_context[0][0]['metadata'][0]['slice_size']
new_prompt_lines.append(new_prompt_line)
print('done! ' + self.log_message)
return new_prompt_lines
class BuildPromptWrapper:
def __init__(self, vectorizer, benchmark, repos, window_size, slice_size, tokenizer):
if vectorizer == 'one-gram':
self.vector_path_builder = FilePathBuilder.one_gram_vector_path
elif vectorizer == 'ada002':
self.vector_path_builder = FilePathBuilder.ada002_vector_path
self.max_top_k = 20
self.repos = repos
self.window_size = window_size
self.slice_size = slice_size
if benchmark == CONSTANTS.line_benchmark:
self.task_path = FilePathBuilder.random_line_completion_benchmark
elif benchmark == CONSTANTS.api_benchmark:
self.task_path = FilePathBuilder.api_completion_benchmark
elif benchmark == CONSTANTS.short_api_benchmark:
self.task_path = FilePathBuilder.short_api_completion_benchmark
elif benchmark == CONSTANTS.short_line_benchmark:
self.task_path = FilePathBuilder.short_random_line_completion_benchmark
self.benchmark = benchmark
self.tokenizer = tokenizer
def _run(self, mode, query_window_path_builder, output_file_path):
workers = []
for repo in self.repos:
query_window_path = query_window_path_builder(repo, self.window_size)
query_line_path = self.vector_path_builder(query_window_path)
repo_window_path = FilePathBuilder.repo_windows_path(repo, self.window_size, self.slice_size)
repo_embedding_path = self.vector_path_builder(repo_window_path)
retrieval_results = FilePathBuilder.retrieval_results_path(query_line_path, repo_embedding_path, self.max_top_k)
query_lines_with_retrieval_results = Tools.load_pickle(retrieval_results)
log_message = f'repo: {repo}, window: {self.window_size}, slice: {self.slice_size}'
worker = PromptBuilder(query_lines_with_retrieval_results, self.task_path, log_message, self.tokenizer)
workers.append(worker)
lines = []
for worker in workers:
lines += worker.build_2nd_stage_input_file(mode)
Tools.dump_jsonl(lines, output_file_path)
def build_first_search_prompt(self, mode, output_path):
query_line_path_temp = functools.partial(FilePathBuilder.search_first_window_path, self.benchmark, mode)
self._run(mode, query_line_path_temp, output_path)
def build_prediction_prompt(self, mode, prediction_path, output_path):
query_line_path_temp = functools.partial(FilePathBuilder.gen_first_window_path, self.benchmark, mode, prediction_path)
self._run(mode, query_line_path_temp, output_path)
| CodeT/RepoCoder/build_prompt.py/0 | {
"file_path": "CodeT/RepoCoder/build_prompt.py",
"repo_id": "CodeT",
"token_count": 3709
} | 223 |
#!/bin/zsh
#
# A shell script to clean up the setup of Codex CLI for zsh
#
set -e
CODEX_CLI_PATH="$( cd "$( dirname "$0" )" && cd .. && pwd )"
openAIConfigPath="$CODEX_CLI_PATH/src/openaiapirc"
zshrcPath="$HOME/.zshrc"
# 1. Remove settings in .zshrc
sed -i '' '/### Codex CLI setup - start/,/### Codex CLI setup - end/d' $zshrcPath
echo "Removed settings in $zshrcPath if present"
# 2. Remove opanaiapirc in /.config
rm -f $openAIConfigPath
echo "Removed $openAIConfigPath"
echo "Codex CLI clean up completed. Please open a new zsh to continue." | Codex-CLI/scripts/zsh_cleanup.sh/0 | {
"file_path": "Codex-CLI/scripts/zsh_cleanup.sh",
"repo_id": "Codex-CLI",
"token_count": 212
} | 224 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: large_person_group.py
Description: Large Person Group section of the Cognitive Face API.
"""
from . import util
def create(large_person_group_id, name=None, user_data=None):
"""Create a new large person group with specified `large_person_group_id`,
`name` and user-provided `user_data`.
Args:
large_person_group_id: User-provided `large_person_group_id` as a
string. The valid characters include numbers, English letters in
lower case, '-' and '_'. The maximum length is 64.
name: Name of the created large person group, maximum length is 128.
user_data: Optional user defined data for the large person group.
Length should not exceed 16KB.
Returns:
An empty response body.
"""
name = name or large_person_group_id
url = 'largepersongroups/{}'.format(large_person_group_id)
json = {
'name': name,
'userData': user_data,
}
return util.request('PUT', url, json=json)
def delete(large_person_group_id):
"""Delete an existing large person group. Persisted face images of all
people in the large person group will also be deleted.
Args:
large_person_group_id: The `large_person_group_id` of the large person
group to be deleted.
Returns:
An empty response body.
"""
url = 'largepersongroups/{}'.format(large_person_group_id)
return util.request('DELETE', url)
def get(large_person_group_id):
"""Retrieve the information of a large person group, including its `name`
and `user_data`.
Args:
large_person_group_id: `large_person_group_id` of the target large
person group.
Returns:
The large person group's information.
"""
url = 'largepersongroups/{}'.format(large_person_group_id)
return util.request('GET', url)
def get_status(large_person_group_id):
"""Retrieve the training status of the large person group (completed or
ongoing). Training can be triggered by `large_person_group.train`. The
training will process for a while on the server side.
Args:
large_person_group_id: `large_person_group_id` of the target large
person group.
Returns:
The large person group's training status.
"""
url = 'largepersongroups/{}/training'.format(large_person_group_id)
return util.request('GET', url)
def list(start=None, top=None):
"""List large person groups and their information.
Args:
start: Optional parameter. List large person groups from the least
`large_person_group_id` greater than the "start". It contains no
more than 64 characters. Default is empty.
top: The number of large person groups to list, ranging in [1, 1000].
Default is 1000.
Returns:
An array of large person groups and their information
(`large_person_group_id`, `name` and `user_data`).
"""
url = 'largepersongroups'
params = {
'start': start,
'top': top,
}
return util.request('GET', url, params=params)
def train(large_person_group_id):
"""Queue a large person group training task, the training task may not be
started immediately.
Args:
large_person_group_id: Target large person group to be trained.
Returns:
An empty JSON body.
"""
url = 'largepersongroups/{}/train'.format(large_person_group_id)
return util.request('POST', url)
def update(large_person_group_id, name=None, user_data=None):
"""Update an existing large person group's `name` and `user_data`. The
properties which does not appear in request body will not be updated.
Args:
large_person_group_id: `large_person_group_id` of the large person
group to be updated.
name: Optional parameter. Large person group display name. The maximum
length is 128.
user_data: Optional parameter. User-provided data attached to the large
person group. The size limit is 16KB.
Returns:
An empty response body.
"""
url = 'largepersongroups/{}'.format(large_person_group_id)
json = {
'name': name,
'userData': user_data,
}
return util.request('PATCH', url, json=json)
| Cognitive-Face-Python/cognitive_face/large_person_group.py/0 | {
"file_path": "Cognitive-Face-Python/cognitive_face/large_person_group.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 1613
} | 225 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: config.sample.py
Description: Unittest shared utilities for Python SDK of the Cognitive Face
API.
"""
import time
import uuid
import cognitive_face as CF
from . import config
# Base URL of online images.
BASE_URL_IMAGE = ('https://raw.githubusercontent.com/'
'Microsoft/Cognitive-Face-Windows/master/Data/')
# Notification of wait.
MSG_WAIT = 'Wait for {} seconds so as to avoid exceeding free quote.'
def wait():
"""Wait for some interval to avoid exceeding quote."""
print(MSG_WAIT.format(config.TIME_SLEEP))
time.sleep(config.TIME_SLEEP)
class DataStore(object):
"""Store the needed data for unittests."""
@classmethod
def setup_face(cls):
"""Setup Face related data."""
image = '{}PersonGroup/Family1-Dad/Family1-Dad3.jpg'.format(
BASE_URL_IMAGE)
res = CF.face.detect(image)
print('[face_id] res: {}'.format(res))
cls.face_id = res[0]['faceId']
print('[face_id]: {}'.format(cls.face_id))
wait()
image = '{}PersonGroup/Family1-Mom/Family1-Mom3.jpg'.format(
BASE_URL_IMAGE)
res = CF.face.detect(image)
print('[another_face_id] res: {}'.format(res))
cls.another_face_id = res[0]['faceId']
print('[another_face_id]: {}'.format(cls.another_face_id))
wait()
image = '{}identification1.jpg'.format(BASE_URL_IMAGE)
res = CF.face.detect(image)
cls.face_ids = []
print('[face_ids] res: {}'.format(res))
for face in res:
cls.face_ids.append(face['faceId'])
print('[face_ids]: {}'.format(cls.face_ids))
wait()
@classmethod
def setup_face_list(cls):
"""Setup Face List related data."""
cls.face_list_id = str(uuid.uuid1())
res = CF.face_list.create(cls.face_list_id)
print('[face_list_id] res: {}'.format(res))
print('[face_list_id]: {}'.format(cls.face_list_id))
wait()
cls.face_persisted_face_id = {}
for name in ['Dad', 'Daughter', 'Mom', 'Son']:
cls.face_persisted_face_id[name] = []
for idx in range(1, 3):
image = '{}PersonGroup/Family1-{}/Family1-{}{}.jpg'.format(
BASE_URL_IMAGE, name, name, idx)
res = CF.face_list.add_face(image, cls.face_list_id)
cls.face_persisted_face_id[name].append(res['persistedFaceId'])
print('[face_persisted_face_id.{}.{}] res: {}'.format(
name, idx, res))
print('[face_persisted_face_id.{}]: {}'.format(
name, cls.face_persisted_face_id[name]))
wait()
@classmethod
def setup_person_group(cls):
"""Setup Person and Person Group related data."""
cls.person_group_id = str(uuid.uuid1())
res = CF.person_group.create(cls.person_group_id)
print('[person_group_id] res: {}'.format(res))
print('[person_group_id]: {}'.format(cls.person_group_id))
wait()
cls.person_id = {}
cls.person_persisted_face_id = {}
for name in ['Dad', 'Daughter', 'Mom', 'Son']:
res = CF.person.create(cls.person_group_id, name)
cls.person_id[name] = res['personId']
print('[person_id.{}] res: {}'.format(name, res))
print('[person_id.{}]: {}'.format(name, cls.person_id[name]))
wait()
cls.person_persisted_face_id[name] = []
for idx in range(1, 3):
image = '{}PersonGroup/Family1-{}/Family1-{}{}.jpg'.format(
BASE_URL_IMAGE, name, name, idx)
res = CF.person.add_face(image, cls.person_group_id,
cls.person_id[name])
cls.person_persisted_face_id[name].append(
res['persistedFaceId'])
print('[person_persisted_face_id.{}.{}] res: {}'.format(
name, idx, res))
print('[person_persisted_face_id.{}]: {}'.format(
name, cls.person_persisted_face_id[name]))
wait()
res = CF.person_group.train(cls.person_group_id)
print('[person_group.train]res: {}', res)
wait()
@classmethod
def setup_large_face_list(cls):
"""Setup Large Face List related data."""
cls.large_face_list_id = str(uuid.uuid1())
res = CF.large_face_list.create(cls.large_face_list_id)
print('[large_face_list_id] res: {}'.format(res))
print('[large_face_list_id]: {}'.format(cls.large_face_list_id))
wait()
cls.large_face_list_face_id = {}
for name in ['Dad', 'Daughter', 'Mom', 'Son']:
cls.large_face_list_face_id[name] = []
for idx in range(1, 3):
image = '{}PersonGroup/Family1-{}/Family1-{}{}.jpg'.format(
BASE_URL_IMAGE, name, name, idx)
res = CF.large_face_list_face.add(image,
cls.large_face_list_id)
cls.large_face_list_face_id[name].append(
res['persistedFaceId'])
print('[large_face_list_face_id.{}.{}] res: {}'.format(
name, idx, res))
print('[large_face_list_face_id.{}]: {}'.format(
name, cls.large_face_list_face_id[name]))
wait()
res = CF.large_face_list.train(cls.large_face_list_id)
print('[large_face_list.train]res: {}', res)
wait()
@classmethod
def setup_large_person_group(cls):
"""Setup Large Person Group related data."""
cls.large_person_group_id = str(uuid.uuid1())
res = CF.large_person_group.create(cls.large_person_group_id)
print('[large_person_group_id] res: {}'.format(res))
print('[large_person_group_id]: {}'.format(cls.large_person_group_id))
wait()
cls.large_person_group_person_id = {}
cls.large_person_group_person_face_id = {}
for name in ['Dad', 'Daughter', 'Mom', 'Son']:
res = CF.large_person_group_person.create(
cls.large_person_group_id, name)
cls.large_person_group_person_id[name] = res['personId']
print(
'[large_person_group_person_id.{}] res: {}'.format(name, res))
print('[large_person_group_person_id.{}]: {}'.format(
name, cls.large_person_group_person_id[name]))
wait()
cls.large_person_group_person_face_id[name] = []
for idx in range(1, 3):
image = '{}PersonGroup/Family1-{}/Family1-{}{}.jpg'.format(
BASE_URL_IMAGE, name, name, idx)
res = CF.large_person_group_person_face.add(
image, cls.large_person_group_id,
cls.large_person_group_person_id[name])
cls.large_person_group_person_face_id[name].append(
res['persistedFaceId'])
print('[large_person_group_person_face_id.{}.{}] res: {}'.
format(name, idx, res))
print('[large_person_group_person_face_id.{}]: {}'.format(
name, cls.large_person_group_person_face_id[name]))
wait()
res = CF.large_person_group.train(cls.large_person_group_id)
print('[large_person_group.train]res: {}', res)
wait()
| Cognitive-Face-Python/cognitive_face/tests/util.py/0 | {
"file_path": "Cognitive-Face-Python/cognitive_face/tests/util.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 3869
} | 226 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: setup.py
Description: Setup script to build and distribute cognitive_face module.
"""
import io
from setuptools import find_packages
from setuptools import setup
README = 'README.md'
def readme():
"""Parse README for long_description."""
return io.open(README, encoding='utf-8').read()
setup(
name='cognitive_face',
version='1.5.0',
packages=find_packages(exclude=['tests']),
install_requires=['requests'],
author='Microsoft',
description='Python SDK for the Cognitive Face API',
long_description=readme(),
long_description_content_type='text/markdown',
license='MIT',
url='https://github.com/Microsoft/Cognitive-Face-Python',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Image Recognition',
],
test_suite='nose.collector',
tests_require=['nose'])
| Cognitive-Face-Python/setup.py/0 | {
"file_path": "Cognitive-Face-Python/setup.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 406
} | 227 |
# Does Deep Learning Learn to Abstract?
This is the official repo for the paper
*'Does Deep Learning Learn to Abstract? A Systematic Probing Framework'*.
This work has been accepted at ICLR 2023.
[OpenReview](https://openreview.net/forum?id=QB1dMPEXau5)
This repo contains data and main code used in this work.
We hope this work can facilitate understanding of the abstraction capability of deep learning model.
## Data
```shell
|-- data
|-- Com
|-- set1
|-- pretrain.json
|-- finetune.json
|-- test.json
|-- pretrain_contrast.json
|-- set2
|-- set3
|-- Mod
```
`./data` contains our two probing tasks Com and Mod.
Each probing task contain 3 different sets.
The difference among sets is that they use different terminals.
Our reported results are averaged on 3 sets.
Each set contain 4 data files.
Each line in the file is one example that has an `input` sequence and `output` sequence.
`pretrain.json` is for MainExp pretraning.
`pretrain_contrast.json` is for ContrastExp pretraining.
`finetune.json` and `test.json` is for finetuning and testing in all three Exps.
## Code
We provide the code for T5 models.
Code for GPT2 models is on the way.
### Requirements
The main dependency is `pytorch` and `transformers`.
```bash
pip install -r requirements.txt
```
### MainExp
```bash
sh Com_MainExp_pretrain.sh
```
This will start training the T5-Base model on `./data/Com/set1/pretrain.json`.
You can change the subtask, subset, and other hyper-parameters
in `Com_MainExp_pretrain.sh` and `t5_run_train.py`.
After the training finished, the model will be saved in `/code/t5_code/checkpoint/Com/MainExp_pretrain_set1_seed1/checkpoint-100000/`.
```bash
sh Com_MainExp_finetune.sh
```
This will load the pretrained checkpoint and finetune on `./data/Com/set1/finetune.json`.
The model will be saved in `./code/t5_code/checkpoint/Com/MainExp_finetune_set1_seed1/checkpoint-100000/`.
```bash
sh Com_MainExp_test.sh
```
This will test the finetuned model on `./data/Com/set1/test.json`.
The testing results will be logged in `./code/t5_code/checkpoint/Com/MainExp_finetune_set1_seed1/checkpoint-50000_test_beam5.txt`
### ControlExp
```bash
sh Com_ControlExp_finetune.sh
sh Com_ControlExp_test.sh
```
### ContrastExp
```bash
sh Com_ContrastExp_pretrain.sh
sh Com_ContrastExp_finetune.sh
sh Com_ContrastExp_test.sh
```
## Citation
```bibtex
@inproceedings{
an2023does,
title={Does Deep Learning Learn to Abstract? A Systematic Probing Framework},
author={Shengnan An and Zeqi Lin and Bei Chen and Qiang Fu and Nanning Zheng and Jian-Guang Lou},
booktitle={International Conference on Learning Representations},
year={2023},
url={https://openreview.net/forum?id=QB1dMPEXau5}
}
```
| ContextualSP/abstraction_probing/README.md/0 | {
"file_path": "ContextualSP/abstraction_probing/README.md",
"repo_id": "ContextualSP",
"token_count": 976
} | 228 |
export CUDA_VISIBLE_DEVICES=4
python t5_run_eval.py \
--model_name_or_path ./checkpoint/Mod/MainExp_finetune_set1_seed1/checkpoint-50000 \
--subtask Mod \
--validation_file test \
--ebatch_size 16 \
--set set1 | ContextualSP/abstraction_probing/code/t5_code/Mod_MainExp_test.sh/0 | {
"file_path": "ContextualSP/abstraction_probing/code/t5_code/Mod_MainExp_test.sh",
"repo_id": "ContextualSP",
"token_count": 84
} | 229 |
import numpy as np
def update_roberta_keys(state, nlayer=24):
for key in state.keys():
if "self_attn.q_proj" in key:
return state
new_dict = {}
for key, val in state.items():
if not "self_attn.in_proj_" in key:
new_dict[key] = val
for i in range(nlayer):
mhaw = "decoder.sentence_encoder.layers.{}.self_attn.in_proj_weight".format(i)
mhab = "decoder.sentence_encoder.layers.{}.self_attn.in_proj_bias".format(i)
weight = state[mhaw]
bais = state[mhab]
size = int(weight.size(0) / 3)
# query, key, value
qw = "decoder.sentence_encoder.layers.{}.self_attn.q_proj.weight".format(i)
kw = "decoder.sentence_encoder.layers.{}.self_attn.k_proj.weight".format(i)
vw = "decoder.sentence_encoder.layers.{}.self_attn.v_proj.weight".format(i)
new_dict[qw] = weight[:size, :]
new_dict[kw] = weight[size : size * 2, :]
new_dict[vw] = weight[size * 2 :, :]
# reconstruct weight
rweight = np.concatenate(
(
new_dict[qw].cpu().numpy(),
new_dict[kw].cpu().numpy(),
new_dict[vw].cpu().numpy(),
),
axis=0,
)
assert np.array_equal(rweight, weight.cpu().numpy())
qb = "decoder.sentence_encoder.layers.{}.self_attn.q_proj.bias".format(i)
kb = "decoder.sentence_encoder.layers.{}.self_attn.k_proj.bias".format(i)
vb = "decoder.sentence_encoder.layers.{}.self_attn.v_proj.bias".format(i)
new_dict[qb] = bais[:size]
new_dict[kb] = bais[size : size * 2]
new_dict[vb] = bais[size * 2 :]
rbais = np.concatenate(
(
new_dict[qb].cpu().numpy(),
new_dict[kb].cpu().numpy(),
new_dict[vb].cpu().numpy(),
),
axis=0,
)
assert np.array_equal(rbais, bais.cpu().numpy())
return new_dict
def patch_name_dict(state):
new_state = {}
for key, val in state.items():
if key.startswith("decoder.sentence_encoder"):
key = "bert.{}".format(key)
new_state[key] = val
elif key.startswith("classification_heads"):
key = "bert.{}".format(key)
new_state[key] = val
else:
new_state[key] = val
return new_state
| ContextualSP/adaptershare/data_utils/roberta_utils.py/0 | {
"file_path": "ContextualSP/adaptershare/data_utils/roberta_utils.py",
"repo_id": "ContextualSP",
"token_count": 1281
} | 230 |
import os
import argparse
import random
from sys import path
path.append(os.getcwd())
from experiments.common_utils import dump_rows
from data_utils.task_def import DataFormat
from data_utils.log_wrapper import create_logger
from experiments.superglue.superglue_utils import *
logger = create_logger(__name__, to_disk=True, log_file="superglue_prepro.log")
def parse_args():
parser = argparse.ArgumentParser(
description="Preprocessing SuperGLUE dataset."
)
parser.add_argument("--seed", type=int, default=13)
parser.add_argument("--root_dir", type=str, default="data")
args = parser.parse_args()
return args
def main(args):
root = args.root_dir
assert os.path.exists(root)
######################################
# SuperGLUE tasks
######################################
cb_train_path = os.path.join(root, "CB/train.jsonl")
cb_dev_path = os.path.join(root, "CB/val.jsonl")
cb_test_path = os.path.join(root, "CB/test.jsonl")
boolq_train_path = os.path.join(root, "BoolQ/train.jsonl")
boolq_dev_path = os.path.join(root, "BoolQ/val.jsonl")
boolq_test_path = os.path.join(root, "BoolQ/test.jsonl")
copa_train_path = os.path.join(root, "COPA/train.jsonl")
copa_dev_path = os.path.join(root, "COPA/val.jsonl")
copa_test_path = os.path.join(root, "COPA/test.jsonl")
record_train_path = os.path.join(root, "ReCoRD/train.jsonl")
record_dev_path = os.path.join(root, "ReCoRD/val.jsonl")
record_test_path = os.path.join(root, "ReCoRD/test.jsonl")
wic_train_path = os.path.join(root, "WiC/train.jsonl")
wic_dev_path = os.path.join(root, "WiC/val.jsonl")
wic_test_path = os.path.join(root, "WiC/test.jsonl")
multirc_train_path = os.path.join(root, "MultiRC/train.jsonl")
multirc_dev_path = os.path.join(root, "MultiRC/val.jsonl")
multirc_test_path = os.path.join(root, "MultiRC/test.jsonl")
######################################
# Loading DATA
######################################
cb_train_data = load_cb(cb_train_path)
cb_dev_data = load_cb(cb_dev_path)
cb_test_data = load_cb(cb_test_path)
logger.info("Loaded {} CB train samples".format(len(cb_train_data)))
logger.info("Loaded {} CB dev samples".format(len(cb_dev_data)))
logger.info("Loaded {} CB test samples".format(len(cb_test_data)))
boolq_train_data = load_boolq(boolq_train_path)
boolq_dev_data = load_boolq(boolq_dev_path)
boolq_test_data = load_boolq(boolq_test_path)
logger.info("Loaded {} BoolQ train samples".format(len(boolq_train_data)))
logger.info("Loaded {} BoolQ dev samples".format(len(boolq_dev_data)))
logger.info("Loaded {} BoolQ test samples".format(len(boolq_test_data)))
copa_train_data = load_copa_mtdnn(copa_train_path)
copa_dev_data = load_copa_mtdnn(copa_dev_path)
copa_test_data = load_copa_mtdnn(copa_test_path)
logger.info("Loaded {} COPA train samples".format(len(copa_train_data)))
logger.info("Loaded {} COPA dev samples".format(len(copa_dev_data)))
logger.info("Loaded {} COPA test samples".format(len(copa_test_data)))
record_train_data = load_record_mtdnn(record_train_path)
record_dev_data = load_record_mtdnn(record_dev_path)
record_test_data = load_record_mtdnn(record_test_path)
logger.info("Loaded {} Record train samples".format(len(record_train_data)))
logger.info("Loaded {} Record dev samples".format(len(record_dev_data)))
logger.info("Loaded {} Record test samples".format(len(record_test_data)))
wic_train_data = load_wic_mtdnn(wic_train_path)
wic_dev_data = load_wic_mtdnn(wic_dev_path)
wic_test_data = load_wic_mtdnn(wic_test_path)
logger.info("Loaded {} WiC train samples".format(len(wic_train_data)))
logger.info("Loaded {} WiC dev samples".format(len(wic_dev_data)))
logger.info("Loaded {} WiC test samples".format(len(wic_test_data)))
multirc_train_data = load_multirc_mtdnn(multirc_train_path)
multirc_dev_data = load_multirc_mtdnn(multirc_dev_path)
multirc_test_data = load_multirc_mtdnn(multirc_test_path)
logger.info("Loaded {} MultiRC train samples".format(len(multirc_train_data)))
logger.info("Loaded {} MultiRC dev samples".format(len(multirc_dev_data)))
logger.info("Loaded {} MultiRC test samples".format(len(multirc_test_data)))
canonical_data_suffix = "canonical_data"
canonical_data_root = os.path.join(root, canonical_data_suffix)
if not os.path.isdir(canonical_data_root):
os.mkdir(canonical_data_root)
cb_train_fout = os.path.join(canonical_data_root, "cb_train.tsv")
cb_dev_fout = os.path.join(canonical_data_root, "cb_dev.tsv")
cb_test_fout = os.path.join(canonical_data_root, "cb_test.tsv")
dump_rows(cb_train_data, cb_train_fout, DataFormat.PremiseAndOneHypothesis)
dump_rows(cb_dev_data, cb_dev_fout, DataFormat.PremiseAndOneHypothesis)
dump_rows(cb_test_data, cb_test_fout, DataFormat.PremiseAndOneHypothesis)
logger.info("done with CB")
boolq_train_fout = os.path.join(canonical_data_root, "boolq_train.tsv")
boolq_dev_fout = os.path.join(canonical_data_root, "boolq_dev.tsv")
boolq_test_fout = os.path.join(canonical_data_root, "boolq_test.tsv")
dump_rows(boolq_train_data, boolq_train_fout, DataFormat.PremiseAndOneHypothesis)
dump_rows(boolq_dev_data, boolq_dev_fout, DataFormat.PremiseAndOneHypothesis)
dump_rows(boolq_test_data, boolq_test_fout, DataFormat.PremiseAndOneHypothesis)
logger.info("done with boolq")
copa_train_fout = os.path.join(canonical_data_root, "copa_train.tsv")
copa_dev_fout = os.path.join(canonical_data_root, "copa_dev.tsv")
copa_test_fout = os.path.join(canonical_data_root, "copa_test.tsv")
dump_rows(copa_train_data, copa_train_fout, DataFormat.PremiseAndMultiHypothesis)
dump_rows(copa_dev_data, copa_dev_fout, DataFormat.PremiseAndMultiHypothesis)
dump_rows(copa_test_data, copa_test_fout, DataFormat.PremiseAndMultiHypothesis)
logger.info("done with record")
record_train_fout = os.path.join(canonical_data_root, "record_train.tsv")
record_dev_fout = os.path.join(canonical_data_root, "record_dev.tsv")
record_test_fout = os.path.join(canonical_data_root, "record_test.tsv")
dump_rows(record_train_data, record_train_fout, DataFormat.ClozeChoice)
dump_rows(record_dev_data, record_dev_fout, DataFormat.ClozeChoice)
dump_rows(record_test_data, record_test_fout, DataFormat.ClozeChoice)
logger.info("done with record")
wic_train_fout = os.path.join(canonical_data_root, "wic_train.tsv")
wic_dev_fout = os.path.join(canonical_data_root, "wic_dev.tsv")
wic_test_fout = os.path.join(canonical_data_root, "wic_test.tsv")
dump_rows(wic_train_data, wic_train_fout, DataFormat.PremiseAndOneHypothesis)
dump_rows(wic_dev_data, wic_dev_fout, DataFormat.PremiseAndOneHypothesis)
dump_rows(wic_test_data, wic_test_fout, DataFormat.PremiseAndOneHypothesis)
logger.info("done with WiC")
multirc_train_fout = os.path.join(canonical_data_root, "multirc_train.tsv")
multirc_dev_fout = os.path.join(canonical_data_root, "multirc_dev.tsv")
multirc_test_fout = os.path.join(canonical_data_root, "multirc_test.tsv")
dump_rows(multirc_train_data, multirc_train_fout, DataFormat.PremiseAndOneHypothesis)
dump_rows(multirc_dev_data, multirc_dev_fout, DataFormat.PremiseAndOneHypothesis)
dump_rows(multirc_test_data, multirc_test_fout, DataFormat.PremiseAndOneHypothesis)
logger.info("done with MultiRC")
if __name__ == "__main__":
args = parse_args()
main(args)
| ContextualSP/adaptershare/experiments/superglue/superglue_prepro.py/0 | {
"file_path": "ContextualSP/adaptershare/experiments/superglue/superglue_prepro.py",
"repo_id": "ContextualSP",
"token_count": 3093
} | 231 |
# coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
import shutil
import os
import subprocess
import filecmp
import os.path
def compare_files(dir1, dir2, common_files, text_mode=False):
same_files = []
diff_files = []
for common_file in common_files:
path0 = os.path.join(dir1, common_file)
path1 = os.path.join(dir2, common_file)
open_mode = "r" if text_mode else "rb"
s0 = open(path0, open_mode).read()
s1 = open(path1, open_mode).read()
if s0 == s1:
same_files.append(common_file)
else:
diff_files.append(common_file)
return same_files, diff_files
def are_dir_trees_equal(dir1, dir2):
"""
Compare two directories recursively. Files in each directory are
assumed to be equal if their names and contents are equal.
@param dir1: First directory path
@param dir2: Second directory path
@return: True if the directory trees are the same and
there were no errors while accessing the directories or files,
False otherwise.
"""
dirs_cmp = filecmp.dircmp(dir1, dir2)
if len(dirs_cmp.left_only)>0 or len(dirs_cmp.right_only)>0 or \
len(dirs_cmp.funny_files)>0:
return False
_, diff_files = compare_files(dir1, dir2, dirs_cmp.common_files, text_mode=True)
if len(diff_files) > 0:
return False
for common_dir in dirs_cmp.common_dirs:
new_dir1 = os.path.join(dir1, common_dir)
new_dir2 = os.path.join(dir2, common_dir)
if not are_dir_trees_equal(new_dir1, new_dir2):
return False
return True
def test_prepro():
if os.access("./run_test", os.F_OK):
shutil.rmtree("./run_test")
os.mkdir("./run_test")
shutil.copytree("./tests/sample_data/input", "./run_test/sample_data")
result = subprocess.check_output("python experiments/glue/glue_prepro.py --root_dir run_test/sample_data", stderr=subprocess.STDOUT, shell=True)
result = subprocess.check_output("python prepro_std.py --model bert-base-uncased --root_dir run_test/sample_data/canonical_data --task_def experiments/glue/glue_task_def.yml", stderr=subprocess.STDOUT, shell=True)
assert are_dir_trees_equal("./run_test/sample_data/canonical_data/bert-base-uncased", "./tests/sample_data/output")
| ContextualSP/adaptershare/tests/test_prepro.py/0 | {
"file_path": "ContextualSP/adaptershare/tests/test_prepro.py",
"repo_id": "ContextualSP",
"token_count": 960
} | 232 |
from .spider_align import SpiderAlignmentModel
from .wtq_align import WTQAlignmentModel
from baseline.wtq_s2s.seq2seq import WTQSeq2SeqModel
from .model_utils import *
from .optmizers import * | ContextualSP/awakening_latent_grounding/models/__init__.py/0 | {
"file_path": "ContextualSP/awakening_latent_grounding/models/__init__.py",
"repo_id": "ContextualSP",
"token_count": 63
} | 233 |
python train.py -model SpiderAlignmentModel -bert bert-large-uncased-whole-word-masking \
-lr 5e-5 -train_bs 6 \
-acc_steps 4 -alw linear_20-30 -num_epochs 30 \
--data_dir data/spider_grounding \
--out_dir checkpoints/model_spider \
--warmup_steps 2000 | ContextualSP/awakening_latent_grounding/train_spider_ground.sh/0 | {
"file_path": "ContextualSP/awakening_latent_grounding/train_spider_ground.sh",
"repo_id": "ContextualSP",
"token_count": 111
} | 234 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# Author: Qian Liu (SivilTaram)
# Original Repo: https://github.com/microsoft/ContextualSP
import torch
from overrides import overrides
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
@MatrixAttention.register("ele_multiply")
class ElementWiseMatrixAttention(MatrixAttention):
"""
This similarity function simply computes the dot product between each pair of vectors, with an
optional scaling to reduce the variance of the output elements.
Parameters
----------
scale_output : ``bool``, optional
If ``True``, we will scale the output by ``math.sqrt(tensor.size(-1))``, to reduce the
variance in the result.
"""
def __init__(self) -> None:
super(ElementWiseMatrixAttention, self).__init__()
@overrides
def forward(self, tensor_1: torch.Tensor, tensor_2: torch.Tensor) -> torch.Tensor:
result = torch.einsum('iaj,ibj->ijab', [tensor_1, tensor_2])
return result
| ContextualSP/incomplete_utterance_rewriting/src/similar_functions/element_wise.py/0 | {
"file_path": "ContextualSP/incomplete_utterance_rewriting/src/similar_functions/element_wise.py",
"repo_id": "ContextualSP",
"token_count": 355
} | 235 |
# coding: utf-8
import re
import nltk
from src.utils.utils import STOP_WORD_LIST
from src.utils.external import complex_rephrase
class NLModifier(object):
def __init__(self, mode='simple'):
self.database = ''
self.utterance = ''
self.utterance_tokens = []
self.utterance_tokens_no_stopwords = []
self.utterance_pos = []
assert mode.lower() in ('simple', 'rule', 'complex')
self.mode = mode.lower()
def refresh(self, database, utterance):
self.database = database
self.utterance = utterance
self.utterance_tokens = nltk.word_tokenize(utterance)
self.utterance_pos = [_[1] for _ in nltk.pos_tag(self.utterance_tokens)]
self.utterance_tokens_no_stopwords = []
for token_idx, token in enumerate(self.utterance_tokens):
if token not in STOP_WORD_LIST:
self.utterance_tokens_no_stopwords.append((token_idx, token))
def modify(self, token, schema_item):
if self.mode == 'simple':
self.utterance_tokens = [ori_token if ori_token != token else schema_item.value
for ori_token in self.utterance_tokens]
elif self.mode == 'rule':
if schema_item is None:
return
column_name, token_type = schema_item.value, schema_item.type
assert token_type in ('column_name', 'value')
# find spans
column_name_tokens = re.split(' |_', column_name)
labels = [True for _ in range(len(self.utterance_tokens_no_stopwords))]
origin_span_idxs = []
for list_idx, (token_idx, utt_token) in enumerate(self.utterance_tokens_no_stopwords):
if utt_token == token:
labels[list_idx] = True
st, ed = token_idx, token_idx
for i in range(list_idx - 1, -1, -1):
if self.utterance_tokens_no_stopwords[i][1] in column_name_tokens:
labels[i] = True
st = self.utterance_tokens_no_stopwords[i][0]
else:
break
for i in range(list_idx + 1, len(self.utterance_tokens_no_stopwords)):
if self.utterance_tokens_no_stopwords[i][i] in column_name_tokens:
labels[i] = True
ed = self.utterance_tokens_no_stopwords[i][0]
else:
break
origin_span_idxs.append((token_idx, st, ed))
assert len(self.utterance_tokens_no_stopwords) == len(labels)
self.utterance_tokens_no_stopwords, self.utterance_pos_no_stopwords = \
[self.utterance_tokens_no_stopwords[i] for i in range(len(labels)) if labels[i] is False], \
[self.utterance_pos_no_stopwords[i] for i in range(len(labels)) if labels[i] is False]
# adopt replacing rules
for token_idx, span_st, span_ed in origin_span_idxs:
token_pos = self.utterance_pos[token_idx]
if token_pos in ('NN', 'NNS', 'NNP', 'NNPS'): # noun
if token_type is 'column_name':
self.utterance_tokens = self.utterance_tokens[:span_st] \
+ column_name.lower() \
+ self.utterance_tokens[span_ed + 1:]
elif token_type is 'value':
self.utterance_tokens[token_idx] = self.utterance_tokens[token_idx].capitalize()
elif token_pos in ('VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ'): # verb, not supported
pass
if token_type is 'column_name':
self.utterance_tokens.insert(token_idx + 1, schema_item.value)
elif token_type is 'value':
self.utterance_tokens.insert(token_idx + 1, schema_item.value)
elif token_pos in ('JJ', 'JJR', 'JJS'): # adjective, adjective comparative, adjective superlative
assert token_type is 'value'
# adj + n -> adj + COLUMN + n
self.utterance_tokens.insert(token_idx, column_name)
elif token_pos in ('RB', 'RBR', 'RBS'):
pass # cannot handle
else:
raise ValueError('Cannot modify words not in n, v, adj')
# remove doubled words
new_utterance = []
last_word = ''
for word in self.utterance_tokens:
if word != last_word:
new_utterance.append(word)
last_word = word
self.utterance_tokens = new_utterance
elif self.mode == 'complex':
utterance = complex_rephrase(' '.join(self.utterance_tokens), token, schema_item.value)
self.utterance_tokens = utterance.split()
def get_utterance(self):
return ' '.join(self.utterance_tokens)
| ContextualSP/interactive_text_to_sql/src/components/nl_modiifer.py/0 | {
"file_path": "ContextualSP/interactive_text_to_sql/src/components/nl_modiifer.py",
"repo_id": "ContextualSP",
"token_count": 2741
} | 236 |
# coding: utf-8
import json
from typing import List
from src.utils.utils import lemma_token
STOP_WORD_LIST = [_.strip() for _ in open('data/common/stop_words.txt', 'r', encoding='utf-8').readlines()]
def align_two_sentences_in_token_level(token_list1, token_list2, stop_word_list=[]):
token_list1 = [(word, idx) for idx, word in enumerate(token_list1) if word not in stop_word_list]
token_list2 = [(word, idx) for idx, word in enumerate(token_list2) if word not in stop_word_list]
def find_exact_match_pairs_from_two_sentences(word_list1, word_list2):
pairs = []
for word1, idx1 in word_list1:
for word2, idx2 in word_list2:
if word1 == word2:
word_list2.remove((word2, idx2))
pairs.append((word1, idx1, word2, idx2))
return pairs
exact_match = find_exact_match_pairs_from_two_sentences(token_list1, token_list2)
exact_match_tokens_idx1 = [_[1] for _ in exact_match]
exact_match_tokens_idx2 = [_[3] for _ in exact_match]
sentence1_lemma = [(lemma_token(word), idx) for word, idx in token_list1 if idx not in exact_match_tokens_idx1]
sentence2_lemma = [(lemma_token(word), idx) for word, idx in token_list2 if idx not in exact_match_tokens_idx2]
lemma_match = find_exact_match_pairs_from_two_sentences(sentence1_lemma, sentence2_lemma)
lemma_match_tokens_idx1 = [_[1] for _ in lemma_match]
lemma_match_tokens_idx2 = [_[3] for _ in lemma_match]
return exact_match + lemma_match
def find_keyword_alignment_by_rule(nl_tokens: List, keyword: str, stop_word_list: List = STOP_WORD_LIST,
only_one_match: bool = False, aligned_mark: List[bool] = None):
aligned_results = []
position_pairs = set()
# step0: eliminate stop words, but keep position info
keyword = keyword.split()
for stop_word in stop_word_list:
if stop_word in keyword:
keyword = keyword.remove(stop_word)
keyword_lemma = [lemma_token(_) for _ in keyword]
informative_token_pairs = []
informative_token_lemma_pairs = []
for pos, token in enumerate(nl_tokens):
if token not in stop_word_list:
informative_token_pairs.append((token, pos))
informative_token_lemma_pairs.append((lemma_token(token), pos))
if not aligned_mark:
aligned_mark = [False for _ in range(len(informative_token_pairs))]
# step1: exact match
for i in range(len(informative_token_pairs) - len(keyword) + 1):
if only_one_match and True in aligned_mark[i: i + len(keyword)]:
continue
st_position = informative_token_pairs[i][1]
ed_position = informative_token_pairs[i + len(keyword) - 1][1]
if [_[0] for _ in informative_token_pairs[i: i + len(keyword)]] == keyword \
and (st_position, ed_position) not in position_pairs:
aligned_results.append((st_position, ed_position, 'exact', keyword))
position_pairs.add((st_position, ed_position))
if only_one_match:
for j in range(i, i + len(keyword)):
aligned_mark[j] = True
# step2: lemma exactly match
for i in range(len(informative_token_lemma_pairs) - len(keyword_lemma) + 1):
if only_one_match and True in aligned_mark[i: i + len(keyword_lemma)]:
continue
st_position = informative_token_lemma_pairs[i][1]
ed_position = informative_token_lemma_pairs[i + len(keyword) - 1][1]
if [_[0] for _ in informative_token_lemma_pairs[i: i + len(keyword_lemma)]] == keyword_lemma \
and (st_position, ed_position) not in position_pairs:
aligned_results.append((st_position, ed_position, 'exact lemma', keyword))
position_pairs.add((st_position, ed_position))
if only_one_match:
for j in range(i, i + len(keyword_lemma)):
aligned_mark[j] = True
def check_in(utterance_span, keyword_tokens):
return len(set(utterance_span) & set(keyword_tokens)) == len(utterance_span) and len(keyword_tokens) <= 3
# step3: partial match
for i in range(len(informative_token_pairs) - len(keyword) + 1):
st_position = informative_token_pairs[i][1]
for end_idx in reversed(range(i + 1, len(informative_token_pairs))):
if only_one_match and True in aligned_mark[i: end_idx]:
continue
sub_tokens = [_[0] for _ in informative_token_pairs[i:end_idx]]
if not sub_tokens:
continue
else:
ed_position = informative_token_pairs[end_idx - 1][1]
if check_in(sub_tokens, keyword):
aligned_results.append((st_position, ed_position, 'partial', keyword))
if only_one_match:
for j in range(i, end_idx):
aligned_mark[j] = True
# step4: lemma partial match
for i in range(len(informative_token_lemma_pairs) - len(keyword) + 1):
for end_idx in reversed(range(i + 1, len(informative_token_lemma_pairs))):
if only_one_match and True in aligned_mark[i: end_idx]:
continue
sub_tokens = [_[0] for _ in informative_token_lemma_pairs[i:end_idx]]
if not sub_tokens:
continue
else:
if check_in(sub_tokens, keyword):
aligned_results.append((informative_token_lemma_pairs[i][1],
informative_token_lemma_pairs[end_idx - 1][1],
'partial lemma', keyword))
if only_one_match:
for j in range(i, end_idx):
aligned_mark[j] = True
return aligned_results, aligned_mark
def find_alignment_by_rule(nl_tokens: List, table_names: List, column_names: List, values: List, only_one_match=False):
aligned_mark = None
# step1: find value match
value_matches = []
for value in values:
value_match, aligned_mark = \
find_keyword_alignment_by_rule(nl_tokens, value, STOP_WORD_LIST,
only_one_match=only_one_match, aligned_mark=aligned_mark)
value_matches += value_match
# step2: find table match
table_matches = []
for table_name in table_names:
table_match, aligned_mark = \
find_keyword_alignment_by_rule(nl_tokens, table_name, STOP_WORD_LIST,
only_one_match=only_one_match, aligned_mark=aligned_mark)
table_matches += table_match
# step3 find column match
column_matches = []
for column_name in column_names:
column_match, aligned_mark = \
find_keyword_alignment_by_rule(nl_tokens, column_name, STOP_WORD_LIST,
only_one_match=only_one_match, aligned_mark=aligned_mark)
column_matches += column_match
alignment_results = {'value': value_matches, 'table': table_matches, 'column': column_matches}
return alignment_results
def test():
nl_tokens = 'show me the name of all English songs and their singers'.split()
table_names = ['singer', 'song']
column_names = ['singer name', 'song name', 'age', 'year']
values = ['English', 'Show time']
ret = find_alignment_by_rule(nl_tokens, table_names, column_names, values, only_one_match=False)
print(json.dumps(ret, indent=4))
if __name__ == '__main__':
test()
| ContextualSP/interactive_text_to_sql/src/utils/link_util.py/0 | {
"file_path": "ContextualSP/interactive_text_to_sql/src/utils/link_util.py",
"repo_id": "ContextualSP",
"token_count": 3558
} | 237 |
import json
import sys
import copy
from itertools import combinations, permutations
from random import choice, choices, shuffle
import math
import argparse
from multiprocessing import Pool
import multiprocessing
from collections import Counter
from functools import reduce
from math import gcd
from random import sample
# from corpus_generation.scene_corpus_generation import postpreprocess_scene
parser = argparse.ArgumentParser()
parser.add_argument("--max_number", type=int, default=100000, help="max number each dataset.")
parser.add_argument("--corpus_file", type=str, default='../corpus/pretraining_corpus_tangrams.txt', help="corpus file")
args = parser.parse_args()
fw = open(args.corpus_file, 'w')
def lcm(numbers):
return reduce((lambda x, y: int(x * y / gcd(x, y))), numbers)
def obtain_action_weight(actions):
temp = Counter([item.split()[0] for item in actions])
lcm_value = lcm(temp.values())
temp = {item:int(lcm_value / temp[item]) for item in temp}
action_weight = [temp[item.strip().split()[0]] for item in actions]
return action_weight
def tangrams_shape_to_letter(shape):
return chr(int(shape) + 65)
def postpreprocess_tangrams(states):
states = [item for item in states.split() if item.strip().split(':')[1]!='_']
states = ['{}:{}'.format(str(i+1), tangrams_shape_to_letter(elem)) for i, elem in enumerate([item.split(':')[1] for item in states])]
states = ['{}:{}'.format(str(i+1), elem) for i, elem in enumerate([item.split(':')[1] for item in states] + ['_'] * (5-len(states)))]
states = ' | '.join(states)
return states
def random_sampling(candidate_list, n, weights=None):
result_list = []
for _ in range(n):
result = choices(candidate_list, k=1, weights=weights)[0]
result_list.append(result)
return result_list
def tangrams_letter_to_shape(letter):
return str(ord(letter) - 65)
def tangrams_executor(slots, actions):
content = [item.split(':')[1] for item in slots]
content = [item for item in content if item != '_']
for action in actions:
splits = action.split()
if splits[0] == 'insert':
if len(content) >= 5:
return 'Failed: sequence is too long.'
else:
if int(splits[1]) > len(content) + 1:
return 'Failed: index greater than sequence length'
else:
if tangrams_letter_to_shape(splits[2]) in content:
return 'Failed: elegram already in the sequence'
else:
content.insert(int(splits[1])-1, tangrams_letter_to_shape(splits[2]))
elif splits[0] == 'remove':
if len(content) <= 1:
return 'Failed: sequence is too short.'
else:
if int(splits[1]) > len(content):
return 'Failed: index greater than sequence length'
else:
del content[int(splits[1])-1]
slots = ['{}:{}'.format(str(i+1), item) for i, item in enumerate(content)]
if len(slots) < 5:
slots = ['{}:{}'.format(str(i+1), elem) for i, elem in enumerate([item.split(':')[1] for item in slots] + ['_'] * (5-len(slots)))]
return slots
def tangrams_state_generator():
all_states = list(set(list(permutations(list(range(5)), 5))))
states = ['{}:{}'.format(str(i+1), item) for i,item in enumerate(random_sampling(all_states, 1)[0])]
# states = ' '.join(states)
return states
def obtain_valid_actions_tangrams(states):
action_list = []
states = [item.strip().split(':')[1] for item in states]
total_len = len([item for item in states if item != '_'])
if total_len < 5:
action_list.extend(['insert {} {}'.format(str(i+1), j) for i in range(total_len+1) for j in ['A','B','C','D','E'] if tangrams_letter_to_shape(j) not in states])
if total_len > 1:
action_list.extend(['remove {}'.format(str(i+1)) for i in range(total_len)])
return list(set(action_list))
def tangrams_corpus_generation(inputs):
total_number, action_number_range = inputs
all_states = list(set(list(permutations(list(range(5)), 5)) + list(permutations(list(range(5)), 4)) + list(permutations(list(range(5)), 3)) + list(permutations(list(range(5)), 2)) + list(permutations(list(range(5)), 1))))
all_actions = ['insert {} {}'.format(str(i+1), j) for i in range(5) for j in ['A', 'B', 'C', 'D', 'E']]
# all_actions = ['insert {} {}'.format(str(i+1), j) for i in range(5) for j in [0,1,2,3,4]]
all_actions += ['remove {}'.format(str(i+1)) for i in range(5)]
count = 0
print('Begin generating tangrams corpus.')
while True:
# prev_states = ['{}:{}'.format(str(i+1), item) for i,item in enumerate(random_sampling(all_states, 1)[0])]
prev_states = tangrams_state_generator()
if len(prev_states) < 5:
prev_states = ['{}:{}'.format(str(i+1), elem) for i, elem in enumerate([item.split(':')[1] for item in prev_states] + ['_'] * (5-len(prev_states)))]
states_this_step = prev_states
index = 0
action_list = []
step_this_case = choice(action_number_range)
while index < step_this_case:
all_valid_actions = obtain_valid_actions_tangrams(states_this_step)
action_weight = obtain_action_weight(all_valid_actions)
action = random_sampling(all_valid_actions, 1, weights=action_weight)
states_this_step = tangrams_executor(states_this_step, action)
assert isinstance(states_this_step, list)
action_list.extend(action)
index += 1
curr_states = states_this_step
prev_states = postpreprocess_tangrams(' '.join(prev_states))
actions = ' '.join(action_list)
curr_states = postpreprocess_tangrams(' '.join(curr_states))
item_row = '\t'.join([actions, prev_states, curr_states])
fw.write(item_row)
fw.write('\n')
count += 1
if count % 10000 == 0:
print('Finish generating {} cases'.format(count))
if count >= total_number:
break
if __name__ == '__main__':
total_number_list = [int(args.max_number * 0.35), int(args.max_number * 0.4), int(args.max_number * 0.15), int(args.max_number * 0.1)]
action_number_range_list = [list(range(1,6)), list(range(6,11)), list(range(11,16)), list(range(16,21))]
cores = multiprocessing.cpu_count()
print("Using {} cores".format(cores))
pool = Pool(cores)
for total_number, action_number_range in zip(total_number_list, action_number_range_list):
res = pool.map(tangrams_corpus_generation, zip([int(total_number // cores) * cores], [action_number_range]*cores))
# tangrams_corpus_generation(int(args.max_number * 0.35), list(range(1,6)))
# tangrams_corpus_generation(int(args.max_number * 0.4), list(range(6,11)))
# tangrams_corpus_generation(int(args.max_number * 0.15), list(range(11,16)))
# tangrams_corpus_generation(int(args.max_number * 0.1), list(range(16,21))) | ContextualSP/lemon/corpus_generation/tangrams_corpus_generation.py/0 | {
"file_path": "ContextualSP/lemon/corpus_generation/tangrams_corpus_generation.py",
"repo_id": "ContextualSP",
"token_count": 2964
} | 238 |
from abc import ABCMeta, abstractmethod
from collections import Mapping
import numpy as np
from gtd.utils import EqualityMixin
class Vocab(object, metaclass=ABCMeta):
@abstractmethod
def word2index(self, w):
pass
@abstractmethod
def index2word(self, i):
pass
class SimpleVocab(Vocab, EqualityMixin):
"""A simple vocabulary object."""
def __init__(self, tokens):
"""Create a vocab.
Args:
tokens (list[unicode]): a unique list of unicode tokens
If t = tokens[i], this vocab will map token t to the integer i.
"""
if not isinstance(tokens, list):
raise ValueError('tokens must be a list')
# build mapping
word2index = {}
for i, tok in enumerate(tokens):
word2index[tok] = i
if len(tokens) != len(word2index):
raise ValueError('tokens must be unique')
self._index2word = list(tokens) # make a copy
self._word2index = word2index
@property
def tokens(self):
"""Return the full list of tokens sorted by their index."""
return self._index2word
def __iter__(self):
"""Iterate through the full list of tokens."""
return iter(self._index2word)
def __len__(self):
"""Total number of tokens indexed."""
return len(self._index2word)
def __contains__(self, w):
"""Check if a token has been indexed by this vocab."""
return w in self._word2index
def word2index(self, w):
return self._word2index[w]
def index2word(self, i):
return self._index2word[i]
def words2indices(self, words):
return list(map(self.word2index, words))
def indices2words(self, indices):
return [self.index2word(i) for i in indices]
def save(self, path):
"""Save SimpleVocab to file path.
Args:
path (str)
"""
with open(path, 'w') as f:
for word in self._index2word:
f.write(word)
f.write('\n')
@classmethod
def load(cls, path):
"""Load SimpleVocab from file path.
Args:
path (str)
Returns:
SimpleVocab
"""
strip_newline = lambda s: s[:-1]
with open(path, 'r') as f:
tokens = [strip_newline(line) for line in f]
return cls(tokens)
class SimpleEmbeddings(Mapping):
def __init__(self, array, vocab):
"""Create embeddings object.
Args:
array (np.array): has shape (vocab_size, embed_dim)
vocab (SimpleVocab): a Vocab object
"""
assert len(array.shape) == 2
assert array.shape[0] == len(vocab) # entries line up
self.array = array
self.vocab = vocab
def __contains__(self, w):
return w in self.vocab
def __getitem__(self, w):
idx = self.vocab.word2index(w)
return np.copy(self.array[idx])
def __iter__(self):
return iter(self.vocab)
def __len__(self):
return len(self.vocab)
@property
def embed_dim(self):
return self.array.shape[1] | ContextualSP/lemon/executor/gtd/ml/vocab.py/0 | {
"file_path": "ContextualSP/lemon/executor/gtd/ml/vocab.py",
"repo_id": "ContextualSP",
"token_count": 1448
} | 239 |
import json
import logging
import time
from abc import ABCMeta, abstractmethod
from collections import Counter
import pytest
from gtd.persist import LazyMapping, EagerMapping, TableMapping, ORM, ORMColumn, FileSequence, FileSerializer, SimpleORM, \
ShardedSequence, CustomSerializer, LazyIterator, BatchIterator, SimpleBatchMapping, SequenceSlice
from sqlalchemy import MetaData, String, Integer, Table, create_engine, select, Column
from sqlalchemy.engine.url import URL
from sqlalchemy.exc import OperationalError
from sqlalchemy.inspection import inspect
class BatchMappingTester(object):
pass # TODO
# make sure getitem throws KeyError when appropriate
class BatchMutableMappingTester(BatchMappingTester):
pass # TODO
class MetaDataExample(MetaData):
def __init__(self):
url = URL(drivername='postgresql+psycopg2', username='Kelvin',
host='localhost', port=5432, database='test_db')
try:
engine = create_engine(url)
engine.connect()
logging.info('Using Postgres test database.')
except OperationalError:
# postgres test database not available
url = 'sqlite:///:memory:'
engine = create_engine(url)
logging.warn('Using SQLite test database.')
super(MetaDataExample, self).__init__(engine)
class LazyMappingExample(LazyMapping):
def __init__(self, cache):
super(LazyMappingExample, self).__init__(cache)
self.computes_called = Counter()
def compute_batch(self, keys):
for key in keys:
self.computes_called[key] += 1
return [k * 2 for k in keys]
class TestLazyMapping(object):
@pytest.fixture
def lazy_dict(self):
cache = SimpleBatchMapping()
return LazyMappingExample(cache)
def test_getitem(self, lazy_dict):
d = lazy_dict
cache = d.cache
assert len(cache) == 0
assert d[3] == 6
# check that it entered cache
assert cache[3] == 6
# get the same value
assert d[3] == 6
# every computation only done once
for val in d.computes_called.values():
assert val <= 1
def test_get_batch(self, lazy_dict):
def assert_batches(xs, correct):
results = lazy_dict.get_batch(xs)
results_par = LazyMapping.compute_batch_parallel(lambda k: 2 * k, xs)
assert results == correct
assert results_par == correct
# every computation only done once
for val in lazy_dict.computes_called.values():
assert val <= 1
# WARNING: this test could fail because computes_called is a Counter, which may
# not be thread-safe.
assert_batches([0, 1, 2, 3], [0, 2, 4, 6])
assert_batches([2, 3, 4], [4, 6, 8])
class EagerMappingExample(EagerMapping):
def __init__(self, cache):
super(EagerMappingExample, self).__init__(cache)
def populate(self, cache):
cache['a'] = 1
cache['b'] = 2
def test_eager_mapping():
cd = EagerMappingExample({})
assert cd.cache == {'a': 1, 'b': 2}
# if cache is already populated, doesn't overwrite it
cd2 = EagerMappingExample({'d': 3})
assert cd2.cache == {'d': 3}
class ORMTester(object, metaclass=ABCMeta):
@pytest.fixture(scope='session')
def metadata(self):
return MetaDataExample()
@abstractmethod
def object(self):
pass
@abstractmethod
def orm(self):
pass
@pytest.yield_fixture
def table(self, orm, metadata):
metadata.drop_all() # clear the database
table_args = [c.unbound_column for c in orm.columns]
table = Table('test_table', metadata, *table_args)
metadata.create_all()
yield table
metadata.drop_all()
def test_preserve_object(self, orm, object, table, metadata):
orm.bind(table)
row = orm.to_row(object)
for key in row:
assert isinstance(key, Column)
eng = metadata.bind
with eng.begin() as conn:
conn.execute(table.insert(values=row))
result = conn.execute(select([table]))
new_row = result.first()
new_object = orm.from_row(new_row)
assert new_object == object
class ExampleKeyORM(ORM):
def __init__(self):
self.name = ORMColumn('name', String)
self.age = ORMColumn('age', Integer)
columns = [self.name, self.age]
super(ExampleKeyORM, self).__init__(columns)
def to_row(self, value):
name, age = value
return {self.name.key: name, self.age.key: age}
def from_row(self, row):
return row[self.name.key], row[self.age.key]
class TestExampleKeyORM(ORMTester):
@pytest.fixture
def object(self):
return ('bob', 4)
@pytest.fixture
def orm(self):
return ExampleKeyORM()
class ExampleValORM(ORM):
def __init__(self):
self.name = ORMColumn('json', String)
super(ExampleValORM, self).__init__([self.name])
def to_row(self, value):
return {self.name.key: json.dumps(value)}
def from_row(self, row):
return json.loads(row[self.name.key])
class TestTableMapping:
@pytest.fixture(scope='session')
def metadata(self):
return MetaDataExample()
@pytest.yield_fixture
def table_dict(self, metadata):
metadata.drop_all()
key_orm = ExampleKeyORM()
val_orm = ExampleValORM()
td = TableMapping('test_table', key_orm, val_orm, metadata)
td[('ren', 1)] = {'hobby': 'bowling'}
td[('bob', 2)] = {'hobby': 'bowling'}
yield td
metadata.drop_all()
def test_contains(self, table_dict):
assert ('ren', 1) in table_dict
assert ('ren', 2) not in table_dict
def test_contains_batch(self, table_dict):
# note that there is a duplicate
batch = [('ren', 1), ('ren', 2), ('ren', 1), ('bob', 2)]
correct = [True, False, True, True]
presence = table_dict.contains_batch(batch)
assert presence == correct
def test_correct_table(self, table_dict):
correct_columns = ['name', 'age', 'json']
correct_keys = ['name', 'age']
names = lambda cols: [col.name for col in cols]
table = table_dict.table
assert names(table.columns) == correct_columns
assert names(inspect(table).primary_key.columns) == correct_keys
def test_set_batch(self, table_dict):
bob_json = {'hobby': 'golf'}
james_json = {'hobby': 'tennis'}
ren_json = {'hobby': 'bowling'}
table_dict.set_batch([(('bob', 2), bob_json), (('james', 3), james_json)])
d = dict(table_dict)
assert d == {('bob', 2): bob_json, ('james', 3): james_json, ('ren', 1): ren_json}
# note that bob_json was overwritten from bowling to golf
def test_getitem(self, table_dict):
assert table_dict[('ren', 1)] == {'hobby': 'bowling'}
with pytest.raises(KeyError):
bob_val = table_dict[('bob', 1)]
def test_setitem(self, table_dict):
table_dict[('ren', 1)] = {'hobby': 'none'}
d = dict(table_dict)
assert d == {('ren', 1): {'hobby': 'none'},
('bob', 2): {'hobby': 'bowling'},
}
def test_delitem(self, table_dict):
del table_dict[('bob', 2)]
assert dict(table_dict) == {('ren', 1): {'hobby': 'bowling'}}
with pytest.raises(KeyError):
del table_dict[('bob', 1)]
def test_iter(self, table_dict):
assert set(iter(table_dict)) == {('ren', 1), ('bob', 2)}
def test_len(self, table_dict):
assert len(table_dict) == 2
# TODO: test iterkeys, iteritems, itervalues
class AppendableSequenceTester(object):
@abstractmethod
def empty_list(self):
"""An empty list object to be tested."""
pass
@abstractmethod
def reference_list(self):
"""A standard Python list containing at least 5 items."""
pass
def test_append_getitem(self, empty_list, reference_list):
lst = empty_list
item = reference_list[0]
lst.append(item)
assert lst[0] == item
def test_extend(self, empty_list, reference_list):
lst = empty_list
lst.extend(reference_list)
for i, item in enumerate(reference_list):
assert lst[i] == item
assert len(lst) == len(reference_list)
def test_len(self, empty_list, reference_list):
lst = empty_list
item = reference_list[0]
lst.append(item)
lst.append(item)
lst.append(item)
lst.append(item)
assert len(lst) == 4
def test_iter(self, empty_list, reference_list):
lst = empty_list
lst.extend(reference_list)
for i, item in enumerate(lst):
assert item == reference_list[i]
def test_slice(self, empty_list, reference_list):
lst = empty_list
lst.extend(reference_list)
assert list(lst[0:2:5]) == reference_list[0:2:5]
class FileSerializerExample(FileSerializer):
def to_line(self, s):
return s
def from_line(self, line):
return line
class FileSerializerTester(object, metaclass=ABCMeta):
@abstractmethod
def serializer(self):
pass
@abstractmethod
def object(self):
pass
def test_serializer(self, serializer, object):
line = serializer.to_line(object)
new_obj = serializer.from_line(line)
assert new_obj == object
class TestFileSequence(AppendableSequenceTester):
@pytest.yield_fixture
def empty_list(self, tmpdir):
path = tmpdir.join('test_file_list.txt')
# whether to use gzip
with FileSequence(str(path), FileSerializerExample()) as seq:
yield seq
@pytest.fixture
def reference_list(self):
return 'a b c d e f g'.split()
def test_json_newline(self, tmpdir):
path = str(tmpdir.join('test_json_items.txt'))
ser = CustomSerializer(lambda o: json.dumps(o), lambda l: json.loads(l))
fs = FileSequence(path, ser)
items = ['hey\nthere', 'two\nobjects serialized']
fs.extend(items)
for i, val in enumerate(fs):
assert val == items[i]
def test_reload(self, empty_list, reference_list):
empty_list.extend(reference_list)
l = empty_list
new_l = FileSequence(l.path, l._ser)
assert len(new_l) == len(l)
for i1, i2 in zip(new_l, l):
assert i1 == i2
class TestShardedSequence(AppendableSequenceTester):
@pytest.yield_fixture
def empty_list(self, tmpdir):
path = str(tmpdir)
shard_size = 3
with ShardedSequence(path, shard_size, FileSerializerExample()) as seq:
yield seq
@pytest.fixture
def reference_list(self):
return [str(i) for i in range(16)]
def test_reload(self, empty_list, reference_list):
empty_list.extend(reference_list) # populate the list
l = empty_list
# reload it
new_l = ShardedSequence(l.directory, l.shard_size, FileSerializerExample())
assert len(new_l) == len(l)
for i1, i2 in zip(new_l, l):
assert i1 == i2
class FileSequenceExample(FileSequence):
def __init__(self, path):
ser = FileSerializerExample()
super(FileSequenceExample, self).__init__(path, ser)
class TableMappingExample(TableMapping):
def __init__(self, metadata):
key_orm = SimpleORM(ORMColumn('key', Integer))
val_orm = SimpleORM(ORMColumn('val', String))
super(TableMappingExample, self).__init__('tabledict_example', key_orm, val_orm, metadata)
class TestTableMappingSpeed(object):
@pytest.fixture(scope='session')
def metadata(self):
return MetaDataExample()
@pytest.yield_fixture
def file_list(self, tmpdir):
path = tmpdir.join('test_file_list.txt')
with FileSequenceExample(str(path)) as seq:
yield seq
@pytest.yield_fixture
def raw_file(self, tmpdir):
p = str(tmpdir.join('raw_file.txt'))
with open(p, 'w') as f:
yield f
@pytest.yield_fixture
def table_dict(self, metadata):
metadata.drop_all()
yield TableMappingExample(metadata)
metadata.drop_all()
def test_extend(self, raw_file, file_list, table_dict):
def time_it(fxn):
start = time.time()
fxn()
stop = time.time()
return stop - start
# 100 rows of text, each with 500,000 characters
vals = ['a' * 500000] * 100
def extend_raw():
for v in vals:
raw_file.write(v)
raw_file.write('\n')
def extend_file():
file_list.extend(vals)
def extend_dict():
d = {i: v for i, v in enumerate(vals)}
table_dict.update(d)
raw_time = time_it(extend_raw)
file_time = time_it(extend_file)
dict_time = time_it(extend_dict)
# just make sure we did the inserts
assert len(file_list) == 100
assert len(table_dict) == 100
assert file_time < raw_time * 2
# TableDict should not be more than 20x slower than file
# On average, seems to be about 15x slower
assert dict_time < file_time * 20
# should take less than two seconds
assert dict_time < 2
class LazyIteratorExample(LazyIterator):
def __init__(self):
cache = []
super(LazyIteratorExample, self).__init__(cache)
def compute_batch(self, k):
batch = []
for i in range(k):
item = self.iterated + i
if item == 15: break
batch.append(item)
return batch
class TestLazyIterator(object):
@pytest.fixture
def iterator(self):
return LazyIteratorExample()
def test_iter(self, iterator):
assert list(iterator) == list(range(15))
def test_next_batch(self, iterator):
assert iterator.next_batch(6) == [0, 1, 2, 3, 4, 5]
assert iterator.next_batch(2) == [6, 7]
assert iterator.next_batch(5) == [8, 9, 10, 11, 12]
assert iterator.next_batch(8) == [13, 14]
with pytest.raises(StopIteration):
iterator.next_batch(1)
class ExampleBatchIterator(BatchIterator):
def __init__(self, total):
self.iterated = 0
self.total = total
super(ExampleBatchIterator, self).__init__(default_batch_size=30)
def next_batch(self, k):
batch = [self.iterated + i for i in range(k)]
batch = [b for b in batch if b < self.total]
if len(batch) == 0:
raise StopIteration
self.iterated += len(batch)
return batch
class TestBatchIterator(object):
@pytest.fixture
def iterator(self):
return ExampleBatchIterator(8)
def test_iterator(self, iterator):
assert list(iterator) == [0, 1, 2, 3, 4, 5, 6, 7]
class TestSequenceSlice(object):
@pytest.fixture
def seq(self):
return list(range(10))
def test_full(self, seq):
ss = list(SequenceSlice(seq, slice(2, 8, 3)))
assert ss == [2, 5]
def test_partial(self, seq):
ss = list(SequenceSlice(seq, slice(None, 8, 3)))
assert ss == [0, 3, 6]
ss = list(SequenceSlice(seq, slice(None, 8, None)))
assert ss == [0, 1, 2, 3, 4, 5, 6, 7]
ss = list(SequenceSlice(seq, slice(None, None, None)))
assert ss == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def test_negative(self, seq):
ss = SequenceSlice(seq, slice(None, 8, 3))
assert ss[-1] == 6
assert ss[-2] == 3
assert ss[-3] == 0
with pytest.raises(IndexError):
ss[-4]
| ContextualSP/lemon/executor/gtd/tests/test_persist.py/0 | {
"file_path": "ContextualSP/lemon/executor/gtd/tests/test_persist.py",
"repo_id": "ContextualSP",
"token_count": 7081
} | 240 |
from abc import ABCMeta, abstractproperty
from collections import Sequence
import numpy as np
from gtd.utils import set_once_attribute
class ParseCase(object, metaclass=ABCMeta):
"""Necessary and sufficient information to make a prediction about the next decision.
Attributes that must be assigned upon creation:
- context (Context): Context
- choices (list[Predicate]): List of possible Predicates
Attributes that can be assigned later:
- choice_logits (list[float]): Logit score for each choice.
Have the same length as self.choices.
- choice_log_probs (list[float]): Log of softmaxed score for each choice.
Have the same length as self.choices.
- decision (Predicate): Predicate that the model decided to predict.
Must be a member of self.choices
Implied attributes:
- denotation (object): Result of the execution on the decided Predicates up to the current self.decision
Only defined when self.decision is already assigned
- logit (float): Logit of the decision
- log_prob (float): Log probability of the decision
"""
__slots__ = ['_context', '_choices',
#'_choice_logits', '_choice_log_probs', '_decision',
# For speed, enable the following line instead of the one above ...
'choice_logits', 'choice_log_probs', 'decision', 'pretty_embed',
'_logit', '_log_prob', '_denotation']
# And comment out these 3 lines
#choice_logits = set_once_attribute('_choice_logits')
#choice_log_probs = set_once_attribute('_choice_log_probs')
#decision = set_once_attribute('_decision')
@property
def context(self):
"""The context (Context object)."""
return self._context
@property
def choices(self):
"""A list of possible choices (list[Predicate])."""
return self._choices
@abstractproperty
def _previous_cases(self):
"""A list of the previous cases (list[ParseCase])."""
pass
@property
def logit(self):
"""Logit (score) of the decision on this ParseCase only (float)."""
if not hasattr(self, '_logit'):
self._logit = self.choice_logits[self.choices.index(self.decision)]
return self._logit
@abstractproperty
def cumulative_logit(self):
"""Sum of the logits of the decisions up to this ParseCase (float)."""
pass
@property
def log_prob(self):
"""Log-Probability of the decision on this ParseCase only (float)."""
if not hasattr(self, '_log_prob'):
self._log_prob = self.choice_log_probs[self.choices.index(self.decision)]
return self._log_prob
@abstractproperty
def cumulative_log_prob(self):
"""Log-Probability of the decisions up to this ParseCase (float)."""
pass
@property
def previous_decisions(self):
"""A list of the previous decisions (List[Predicate])."""
return [c.decision for c in self._previous_cases]
def __str__(self):
return '{{{};utt {}/{};[{}];{} from {} choices}}'.format(
'|'.join(' '.join(x.encode('utf-8') for x in u)
for u in self.context.utterances)[:20] + '...',
self.current_utterance_idx,
len(self.context.utterances),
' '.join(pred.name for pred in self.previous_decisions),
(self.decision if hasattr(self, 'decision') else None),
len(self.choices))
__repr__ = __str__
@property
def path(self):
"""The sequence of ParseCases leading up to and including this one.
Returns:
ParsePath
"""
cases = self._previous_cases + [self]
return ParsePath(cases)
@property
def denotation(self):
"""The denotation of the decisions up to the current decision.
If the execution is successful, the denotation is an arbitrary object
returned from the executor. Otherwise, the denotation is an Exception.
"""
try:
return self._denotation
except AttributeError:
y_toks = [self.decision]
executor = self.context.executor
old_denotation = None
for case in reversed(self._previous_cases):
if hasattr(case, '_denotation'):
old_denotation = case._denotation
break
else:
y_toks.append(case.decision)
if isinstance(old_denotation, Exception):
self._denotation = old_denotation
else:
try:
self._denotation = executor.execute(y_toks[::-1], old_denotation)
except Exception as e:
self._denotation = e
return self._denotation
@property
def current_utterance_idx(self):
"""Index of the utterance we are focusing on, PRIOR to making a decision for this ParseCase."""
previous_cases = self._previous_cases
if len(previous_cases) == 0:
utterance_idx = 0 # we always start on the first utterance
else:
previous_case = previous_cases[-1]
denotation = previous_case.denotation
assert not isinstance(denotation, Exception)
utterance_idx = denotation.utterance_idx
return utterance_idx
@property
def current_utterance(self):
"""The utterance we are focusing on, PRIOR to making a decision for this ParseCase."""
return self.context.utterances[self.current_utterance_idx]
@property
def next_utterance_idx(self):
"""Index of the utterance we will focus on next, AFTER making a decision for this ParseCase.
Only callable if decision is already set.
Return len(context.utterances) if there is no utterance left.
"""
assert not isinstance(self.denotation, Exception)
return self.denotation.utterance_idx
@property
def next_utterance(self):
"""The utterance we will focus on next, AFTER making a decision for this ParseCase.
Only callable if decision is already set.
Return None if there is no utterance left.
"""
next_utterance_idx = self.next_utterance_idx
if next_utterance_idx == len(self.context.utterances):
return None
return self.context.utterances[next_utterance_idx]
@classmethod
def initial(cls, context):
"""Convenience method for creating a new InitialParseCase.
Args:
context (Context)
Returns:
InitialParseCase
"""
choices = context.predicates
return InitialParseCase(context, choices)
@classmethod
def extend(cls, previous_case):
"""Convenience method for creating a new RecursiveParseCase.
Args:
previous_case (ParseCase)
Returns:
RecursiveParseCase
"""
choices = previous_case.context.predicates
return RecursiveParseCase(previous_case, choices)
def __hash__(self):
return hash((self.context, self.choices, self.decision))
def __eq__(self, other):
if type(self) != type(other):
return False
return (self.context == other.context and
self.choices == other.choices and
self.decision == other.decision)
def valid_continuations(self, path_checker):
"""Returns all of the valid continuations of this case extending from
this path according to the path_checker. A path is valid if it is
terminated and finalizable or unterminated and checks out with the
path_checker.
Args:
path_checker (PathChecker)
Returns:
list[ParsePath]: the continuations
"""
continuations = []
for choice in self.choices:
clone = self.copy_with_decision(choice)
denotation = clone.denotation
if not isinstance(denotation, Exception):
path = clone.path
if path.terminated:
if path.finalizable:
continuations.append(path)
elif path_checker(path):
continuations.append(path)
return continuations
class InitialParseCase(ParseCase):
"""Represents the initial ParseCase."""
__slots__ = []
def __init__(self, context, choices):
self._context = context
self._choices = choices
@property
def _previous_cases(self):
return []
@property
def cumulative_logit(self):
return self.logit
@property
def cumulative_log_prob(self):
return self.log_prob
def copy_with_decision(self, decision):
"""Return a copy with a specific decision"""
clone = InitialParseCase(self._context, self._choices)
clone.choice_logits = self.choice_logits
clone.choice_log_probs = self.choice_log_probs
clone.decision = decision
clone.pretty_embed = self.pretty_embed
try:
clone._denotation = self._context.executor.execute_predicate(decision)
except Exception as e:
clone._denotation = e
return clone
class RecursiveParseCase(ParseCase):
"""Represents a non-initial ParseCase."""
__slots__ = ['_prev_case', '_cumulative_logit', '_cumulative_log_prob']
def __init__(self, previous_case, choices):
"""Create a ParseCase from a previous case.
Args:
previous_case (ParseCase): the previous ParseCase
choices (list[Predicate]): a list of possible next decisions
"""
try:
previous_case.decision
except AttributeError:
raise RuntimeError('Previous ParseCase must already have a decision.')
self._prev_case = previous_case
self._context = previous_case.context
self._choices = choices
@property
def _previous_cases(self):
case = self._prev_case
p = []
while True:
p.append(case)
if isinstance(case, RecursiveParseCase):
case = case._prev_case
else:
break
return list(reversed(p))
@property
def cumulative_logit(self):
if not hasattr(self, '_cumulative_logit'):
self._cumulative_logit = self._prev_case.cumulative_logit + self.logit
return self._cumulative_logit
@property
def cumulative_log_prob(self):
if not hasattr(self, '_cumulative_log_prob'):
self._cumulative_log_prob = self._prev_case.cumulative_log_prob + self.log_prob
return self._cumulative_log_prob
def copy_with_decision(self, decision):
"""Return a copy with a specific decision"""
clone = RecursiveParseCase(self._prev_case, self._choices)
clone.choice_logits = self.choice_logits
clone.choice_log_probs = self.choice_log_probs
clone.decision = decision
clone.pretty_embed = self.pretty_embed
try:
clone._denotation = self._context.executor.execute_predicate(decision, self._prev_case.denotation)
except Exception as e:
clone._denotation = e
return clone
class ParsePath(Sequence):
"""Represent an entire Sequence of ParseCases."""
__slots__ = ['_cases', '_context', '_finalized_denotation', '_is_zombie']
@classmethod
def empty(cls, context):
return ParsePath([], context)
def __init__(self, cases, context=None):
self._cases = cases
if not cases:
if context is None:
raise RuntimeError('Must specify context for an empty ParsePath')
self._context = context
else:
self._context = cases[0].context
self._is_zombie = False
def __getitem__(self, i):
return self._cases[i]
def __len__(self):
return len(self._cases)
def __str__(self):
return 'Path' + str(self._cases)
__repr__ = __str__
def __hash__(self):
return hash((tuple(self._cases), self._context))
def __eq__(self, other):
return self._cases == other._cases and self._context == other._context
@property
def denotation(self):
"""The intermediate denotation (Denotation object)"""
assert self._cases
return self._cases[-1].denotation
@property
def finalized_denotation(self):
"""The finalized denotation (list[Value]).
Only available when the path is terminated."""
if self._is_zombie:
return [] # Always incorrect
if not hasattr(self, '_finalized_denotation'):
assert self.terminated
executor = self.context.executor
denotation = self.denotation
self._finalized_denotation = executor.finalize(denotation)
return self._finalized_denotation
@property
def context(self):
"""The context (Context object)"""
return self._context
@property
def decisions(self):
"""The entire sequence of decisions."""
return [case.decision for case in self]
@property
def score(self):
"""The overall raw score (total logit) of the path.
All cases must already have been scored for this method to work.
"""
if not self._cases:
return 0.
return self._cases[-1].cumulative_logit
@property
def log_prob(self):
"""The overall log-probability of the path.
All cases must already have been scored for this method to work.
"""
if not self._cases:
return 0.
return self._cases[-1].cumulative_log_prob
@property
def locally_normalized_prob(self):
"""The overall locally normalized probability of the path.
All cases must already have been scored for this method to work.
"""
return np.exp(self.log_prob)
@property
def terminated(self):
"""Whether the path is terminated.
A path is terminated when all utterances were consumed
or the path is a zombie path.
"""
if not self._cases:
return False
if self._is_zombie:
return True
return self.denotation.utterance_idx == len(self.context.utterances)
def extend(self):
"""Create a new ParseCase that would continue from the path.
Return:
ParseCase
"""
if not self._cases:
return ParseCase.initial(self.context)
else:
return ParseCase.extend(self._cases[-1])
@property
def finalizable(self):
"""Takes a terminated ParsePath and checks if its denotation
can be finalized
Args:
path (ParsePath): Must be terminated
Returns:
bool: Whether the denotation can be finalized or not
"""
assert self.terminated
try:
self.finalized_denotation
return True
except ValueError as e:
return False
def zombie_clone(self):
"""Make a clone of the path but with is_zombie = True.
Used in REINFORCE for giving negative reward to futile paths.
"""
assert len(self._cases) > 0
path = ParsePath(self._cases)
path._is_zombie = True
return path
class PrettyCaseEmbedding(object):
"""Visualize how ParseModel embeds a Case."""
def __init__(self, history_hash, stack_hash):
"""
Args:
history_hash (np.ndarray): of shape [history_length]
stack_hash (np.ndarray): of shape [max_stack_size]
"""
self.history_hash = history_hash
self.stack_hash = stack_hash
def __repr__(self):
return 'history: {} stack: {}'.format(self.history_hash, self.stack_hash)
| ContextualSP/lemon/executor/strongsup/parse_case.py/0 | {
"file_path": "ContextualSP/lemon/executor/strongsup/parse_case.py",
"repo_id": "ContextualSP",
"token_count": 6805
} | 241 |